Ejemplo n.º 1
0
class CompositeCrackBridge(HasTraits):

    reinforcement_lst = List(Instance(Reinforcement))
    w = Float
    E_m = Float
    Ll = Float
    Lr = Float

    V_f_tot = Property(depends_on='reinforcement_lst+')

    @cached_property
    def _get_V_f_tot(self):
        V_f_tot = 0.0
        for reinf in self.reinforcement_lst:
            V_f_tot += reinf.V_f
        return V_f_tot

    E_c = Property(depends_on='reinforcement_lst+')

    @cached_property
    def _get_E_c(self):
        E_fibers = 0.0
        for reinf in self.reinforcement_lst:
            E_fibers += reinf.V_f * reinf.E_f
        E_c = self.E_m * (1. - self.V_f_tot) + E_fibers
        return E_c * (1. + 1e-15)

    sorted_reinf_lst = Property(Tuple(List, List),
                                depends_on='reinforcement_lst')

    @cached_property
    def _get_sorted_reinf_lst(self):
        cont_reinf_lst = []
        short_reinf_lst = []
        for reinf in self.reinforcement_lst:
            if reinf.__class__ == ContinuousFibers:
                cont_reinf_lst.append(reinf)
            elif reinf.__class__ == ShortFibers:
                short_reinf_lst.append(reinf)
        return cont_reinf_lst, short_reinf_lst

    cont_fibers_instance = Instance(CrackBridgeContFibers)

    def _cont_fibers_instance_default(self):
        return CrackBridgeContFibers()

    cont_fibers = Property(Instance(CrackBridgeContFibers),
                           depends_on='reinforcement_lst+,Ll,Lr,E_m,w')

    @cached_property
    def _get_cont_fibers(self):
        cbcf = self.cont_fibers_instance
        cbcf.w = self.w
        cbcf.Ll = self.Ll
        cbcf.Lr = self.Lr
        cbcf.E_m = self.E_m
        cbcf.E_c = self.E_c
        cbcf.cont_reinf_lst = self.sorted_reinf_lst[0]
        return cbcf

    short_fibers_instance = Instance(CrackBridgeShortFibers)

    def _short_fibers_instance_default(self):
        return CrackBridgeShortFibers()

    short_fibers = Property(Instance(CrackBridgeShortFibers),
                            depends_on='reinforcement_lst+,E_m,w')

    @cached_property
    def _get_short_fibers(self):
        cbsf = self.short_fibers_instance
        cbsf.w = self.w
        cbsf.E_m = self.E_m
        cbsf.E_c = self.E_c
        cbsf.short_reinf_lst = self.sorted_reinf_lst[1]
        return cbsf

    _x_arr = Property(Array, depends_on='w,E_m,Ll,Lr,reinforcement_lst+')

    @cached_property
    def _get__x_arr(self):
        if len(self.sorted_reinf_lst[0]) != 0 and len(
                self.sorted_reinf_lst[1]) != 0:
            added_x = np.hstack(
                (self.cont_fibers.x_arr, self.short_fibers.x_arr))
            sorted_unique_x = np.unique(added_x)
            return sorted_unique_x
        elif len(self.sorted_reinf_lst[0]) != 0:
            return self.cont_fibers.x_arr
        elif len(self.sorted_reinf_lst[1]) != 0:
            return self.short_fibers.x_arr

    _epsm_arr = Property(Array, depends_on='w,E_m,Ll,Lr,reinforcement_lst+')

    @cached_property
    def _get__epsm_arr(self):
        if len(self.sorted_reinf_lst[0]) != 0 and len(
                self.sorted_reinf_lst[1]) != 0:
            epsm_cont_interp = MFnLineArray(xdata=self.cont_fibers.x_arr,
                                            ydata=self.cont_fibers.epsm_arr)
            epsm_short_interp = MFnLineArray(xdata=self.short_fibers.x_arr,
                                             ydata=self.short_fibers.epsm_arr)
            added_epsm_cont = self.cont_fibers.epsm_arr + epsm_short_interp.get_values(
                self.cont_fibers.x_arr)
            added_epsm_short = self.short_fibers.epsm_arr + epsm_cont_interp.get_values(
                self.short_fibers.x_arr)
            sorted_unique_idx = np.unique(np.hstack(
                (self.cont_fibers.x_arr, self.short_fibers.x_arr)),
                                          return_index=True)[1]
            return np.hstack(
                (added_epsm_cont, added_epsm_short))[sorted_unique_idx]
        elif len(self.sorted_reinf_lst[0]) != 0:
            return self.cont_fibers.epsm_arr
        elif len(self.sorted_reinf_lst[1]) != 0:
            self.short_fibers.w = self.w
            return self.short_fibers.epsm_arr

    _epsf_arr = Property(Array, depends_on='w,E_m,Ll,Lr,reinforcement_lst+')

    @cached_property
    def _get__epsf_arr(self):
        ''' only for continuous reinforcement '''
        if len(self.sorted_reinf_lst[0]) != 0 and len(
                self.sorted_reinf_lst[1]) == 0:
            self.cont_fibers.w = self.w
            return self.cont_fibers.epsf_arr
        else:
            raise ValueError('epsf can only be computed for continuous fibers')

    _epsf0_arr = Property(Array, depends_on='w,E_m,Ll,Lr,reinforcement_lst+')

    @cached_property
    def _get__epsf0_arr(self):
        if len(self.sorted_reinf_lst[0]) != 0 and len(
                self.sorted_reinf_lst[1]) != 0:
            epsf0_cont = self.cont_fibers.epsf0_arr
            epsf0_short = self.short_fibers.epsf0_arr
        elif len(self.sorted_reinf_lst[0]) != 0:
            epsf0_cont = self.cont_fibers.epsf0_arr
            epsf0_short = np.array([])
        elif len(self.sorted_reinf_lst[1]) != 0:
            epsf0_cont = np.array([])
            epsf0_short = self.short_fibers.epsf0_arr
        return epsf0_cont, epsf0_short

    _epsf0_arr_cont = Property(Array,
                               depends_on='w,E_m,Ll,Lr,reinforcement_lst+')

    @cached_property
    def _get__epsf0_arr_cont(self):
        return self._epsf0_arr[0]

    _epsf0_arr_short = Property(Array,
                                depends_on='w,E_m,Ll,Lr,reinforcement_lst+')

    @cached_property
    def _get__epsf0_arr_short(self):
        return self._epsf0_arr[1]

    sigma_c = Property(depends_on='w,E_m,Ll,Lr,reinforcement_lst+')

    @cached_property
    def _get_sigma_c(self):
        if len(self.sorted_reinf_lst[0]) != 0 and len(
                self.sorted_reinf_lst[1]) != 0:
            sigma_c_cont = np.sum(
                self._epsf0_arr_cont * self.cont_fibers.sorted_stats_weights *
                self.cont_fibers.sorted_V_f * self.cont_fibers.sorted_nu_r *
                self.cont_fibers.sorted_E_f * (1. - self.cont_fibers.damage))
            sigma_c_short = np.sum(self._epsf0_arr_short *
                                   self.short_fibers.sorted_V_f *
                                   self.short_fibers.sorted_E_f)
        elif len(self.sorted_reinf_lst[0]) != 0:
            sigma_c_cont = np.sum(
                self._epsf0_arr_cont * self.cont_fibers.sorted_stats_weights *
                self.cont_fibers.sorted_V_f * self.cont_fibers.sorted_nu_r *
                self.cont_fibers.sorted_E_f * (1. - self.cont_fibers.damage))
            sigma_c_short = 0.0
        elif len(self.sorted_reinf_lst[1]) != 0:
            sigma_c_cont = 0.0
            sigma_c_short = np.sum(self._epsf0_arr_short *
                                   self.short_fibers.sorted_V_f *
                                   self.short_fibers.sorted_E_f)
        return sigma_c_cont + sigma_c_short
Ejemplo n.º 2
0
class RIDVariable(HasTraits):
    """
    Association between a random variable and distribution.
    """

    title = Str('RIDvarible')

    s = WeakRef

    rf = WeakRef

    n_int = Int(20,
                enter_set=True,
                auto_set=False,
                desc='Number of integration points')

    def _n_int_changed(self):
        if self.pd:
            self.pd.n_segments = self.n_int

    # should this variable be randomized

    random = Bool(False, randomization_changed=True)

    def _random_changed(self):
        # get the default distribution
        if self.random:
            self.s.rv_dict[self.varname] = RV(pd=self.pd,
                                              name=self.varname,
                                              n_int=self.n_int)
        else:
            del self.s.rv_dict[self.varname]

    # name of the random variable (within the response function)
    #
    varname = String

    source_trait = Trait

    trait_value = Float

    pd = Property(Instance(IPDistrib), depends_on='random')

    @cached_property
    def _get_pd(self):
        if self.random:
            tr = self.rf.trait(self.varname)
            pd = PDistrib(distr_choice=tr.distr[0], n_segments=self.n_int)
            trait = self.rf.trait(self.varname)

            # get the distribution parameters from the metadata
            #
            distr_params = {
                'scale': trait.scale,
                'loc': trait.loc,
                'shape': trait.shape
            }
            dparams = {}
            for key, val in list(distr_params.items()):
                if val:
                    dparams[key] = val

            pd.distr_type.set(**dparams)
            return pd
        else:
            return None

    value = Property

    def _get_value(self):
        if self.random:
            return ''
        else:
            return '%g' % self.trait_value

    # --------------------------------------------

    # default view specification
    def default_traits_view(self):
        return View(HGroup(Item(
            'n_int',
            visible_when='random',
            label='NIP',
        ),
                           Spring(),
                           show_border=True,
                           label='Variable name: %s' % self.varname),
                    Item('pd@', show_label=False),
                    resizable=True,
                    id='rid_variable',
                    height=800)
Ejemplo n.º 3
0
class CompositeCrackBridgeLoop(HasTraits):

    reinforcement_lst = List(Instance(Reinforcement))
    w = Float
    E_m = Float
    Ll = Float
    Lr = Float

    V_f_tot = Property(depends_on='reinforcement_lst+')

    @cached_property
    def _get_V_f_tot(self):
        V_f_tot = 0.0
        for reinf in self.reinforcement_lst:
            V_f_tot += reinf.V_f
        return V_f_tot

    E_c = Property(depends_on='reinforcement_lst+')

    @cached_property
    def _get_E_c(self):
        E_fibers = 0.0
        for reinf in self.reinforcement_lst:
            E_fibers += reinf.V_f * reinf.E_f
        return self.E_m * (1. - self.V_f_tot) + E_fibers

    sorted_theta = Property(depends_on='reinforcement_lst+')

    @cached_property
    def _get_sorted_theta(self):
        '''sorts the integral points by bond in descending order'''
        depsf_arr = np.array([])
        V_f_arr = np.array([])
        E_f_arr = np.array([])
        xi_arr = np.array([])
        stat_weights_arr = np.array([])
        nu_r_arr = np.array([])
        for reinf in self.reinforcement_lst:
            n_int = len(np.hstack((np.array([]), reinf.depsf_arr)))
            depsf_arr = np.hstack((depsf_arr, reinf.depsf_arr))
            V_f_arr = np.hstack((V_f_arr, np.repeat(reinf.V_f, n_int)))
            E_f_arr = np.hstack((E_f_arr, np.repeat(reinf.E_f, n_int)))
            xi_arr = np.hstack((xi_arr, np.repeat(reinf.xi, n_int)))
            stat_weights_arr = np.hstack(
                (stat_weights_arr, np.repeat(reinf.stat_weights, n_int)))
            nu_r_arr = np.hstack((nu_r_arr, reinf.nu_r))
        argsort = np.argsort(depsf_arr)[::-1]
        return depsf_arr[argsort], V_f_arr[argsort], E_f_arr[argsort], \
                xi_arr[argsort],  stat_weights_arr[argsort], \
                nu_r_arr[argsort]

    sorted_depsf = Property(depends_on='reinforcement_lst+')

    @cached_property
    def _get_sorted_depsf(self):
        return self.sorted_theta[0]

    sorted_V_f = Property(depends_on='reinforcement_lst+')

    @cached_property
    def _get_sorted_V_f(self):
        return self.sorted_theta[1]

    sorted_E_f = Property(depends_on='reinforcement_lst+')

    @cached_property
    def _get_sorted_E_f(self):
        return self.sorted_theta[2]

    sorted_xi = Property(depends_on='reinforcement_lst+')

    @cached_property
    def _get_sorted_xi(self):
        return self.sorted_theta[3]

    sorted_stats_weights = Property(depends_on='reinforcement_lst+')

    @cached_property
    def _get_sorted_stats_weights(self):
        return self.sorted_theta[4]

    sorted_nu_r = Property(depends_on='reinforcement_lst+')

    @cached_property
    def _get_sorted_nu_r(self):
        return self.sorted_theta[5]

    sorted_xi_cdf = Property(depends_on='reinforcement_lst+')

    @cached_property
    def _get_sorted_xi_cdf(self):
        '''breaking strain: CDF for random and Heaviside for discrete values'''
        # TODO: does not work for reinforcement types with the same xi
        methods = []
        masks = []
        for reinf in self.reinforcement_lst:
            masks.append(self.sorted_xi == reinf.xi)
            if isinstance(reinf.xi, FloatType):
                methods.append(lambda x: 1.0 * (reinf.xi <= x))
            elif isinstance(reinf.xi, RV):
                methods.append(reinf.xi._distr.cdf)
            elif isinstance(reinf.xi, WeibullFibers):
                methods.append(reinf.xi.weibull_fibers_Pf)
        return methods, masks

    def vect_xi_cdf(self, epsy, x_short, x_long):
        Pf = np.zeros_like(self.sorted_depsf)
        methods, masks = self.sorted_xi_cdf
        for i, method in enumerate(methods):
            if method.__name__ == 'weibull_fibers_Pf':
                Pf += method(epsy * masks[i],
                             self.sorted_depsf,
                             x_short=x_short,
                             x_long=x_long)
            else:
                Pf += method(epsy * masks[i])
        return Pf

    def dem_depsf(self, depsf, damage):
        '''evaluates the deps_m given deps_f
        at that point and the damage array'''
        Kf = self.sorted_V_f * self.sorted_nu_r * \
            self.sorted_stats_weights * self.sorted_E_f
        Kf_intact_bonded = np.sum(Kf * (depsf <= self.sorted_depsf) *
                                  (1. - damage))
        Kf_broken = np.sum(Kf * damage)
        Kf_add = Kf_intact_bonded + Kf_broken
        Km = (1. - self.V_f_tot) * self.E_m
        E_mtrx = Km + Kf_add
        mean_acting_T = np.sum(self.sorted_depsf *
                               (self.sorted_depsf < depsf) * Kf *
                               (1. - damage))
        return mean_acting_T / E_mtrx

    def double_sided(self, defi, x0, demi, em0, um0, damage):
        dxi = (-defi * x0 - demi * x0 +
               (defi * x0**2 * demi + demi**2 * x0**2 - 2 * defi * em0 * x0 +
                2 * defi * um0 + defi * self.w - 2 * demi * em0 * x0 +
                2 * demi * um0 + demi * self.w)**(.5)) / (defi + demi)
        dem = self.dem_depsf(defi, damage)
        emi = em0 + demi * dxi
        umi = um0 + (em0 + emi) * dxi / 2.
        return dxi, dem, emi, umi

    def one_sided(self, defi, x0, demi, em0, um0, clamped, damage):
        w = self.w
        xs = clamped[0]
        ums = clamped[1]
        dxi = (-xs * demi - demi * x0 - defi * xs - defi * x0 +
               (2 * demi * x0 * defi * xs + demi * x0**2 * defi +
                2 * demi**2 * x0 * xs + 3 * defi * xs**2 * demi -
                2 * demi * xs * em0 - 2 * demi * em0 * x0 -
                2 * defi * xs * em0 - 2 * defi * em0 * x0 + demi**2 * x0**2 +
                2 * defi**2 * xs**2 + xs**2 * demi**2 + 2 * demi * um0 +
                2 * demi * ums + 2 * demi * w + 2 * defi * um0 +
                2 * defi * ums + 2 * defi * w)**(0.5)) / (demi + defi)
        dem = self.dem_depsf(defi, damage)
        emi = em0 + demi * dxi
        umi = um0 + (em0 + emi) * dxi / 2.
        return dxi, dem, emi, umi

    def clamped(self, defi, xs, xl, ems, eml, ums, uml):
        c1 = eml * xl - uml
        c2 = ems * xs - ums
        c3 = defi * xl**2 / 2.
        c4 = defi * xs**2 / 2.
        c5 = (defi * (xl - xs) + (eml - ems)) * xs
        h = (self.w - c1 - c2 - c3 - c4 - c5) / (xl + xs)
        return defi * xl + eml + h

    def damage_residuum(self, iter_damage):
        um_short, em_short, x_short = [0.0], [0.0], [0.0]
        um_long, em_long, x_long = [0.0], [0.0], [0.0]
        init_dem = self.dem_depsf(np.infty, iter_damage)
        dem_short = [init_dem]
        dem_long = [init_dem]
        epsf0 = np.zeros_like(self.sorted_depsf)
        Lmin = min(self.Ll, self.Lr)
        Lmax = max(self.Ll, self.Lr)
        for i, defi in enumerate(self.sorted_depsf):
            if x_short[-1] < Lmin and x_long[-1] < Lmax:
                '''double sided pullout'''
                dxi, dem, emi, umi = self.double_sided(defi, x_short[-1],
                                                       dem_short[-1],
                                                       em_short[-1],
                                                       um_short[-1],
                                                       iter_damage)
                if x_short[-1] + dxi < Lmin:
                    # dx increment does not reach the boundary
                    dem_short.append(dem)
                    dem_long.append(dem)
                    x_short.append(x_short[-1] + dxi)
                    x_long.append(x_long[-1] + dxi)
                    em_short.append(emi)
                    em_long.append(emi)
                    um_short.append(umi)
                    um_long.append(umi)
                    epsf0[i] = (em_short[-1] + x_short[-1] * defi)
                else:
                    # boundary reached at shorter side
                    deltax = Lmin - x_short[-1]
                    x_short.append(Lmin)
                    em_short.append(em_short[-1] + dem_short[-1] * deltax)
                    um_short.append(um_short[-1] +
                                    (em_short[-2] + em_short[-1]) * deltax /
                                    2.)
                    short_side = [x_short[-1], um_short[-1]]
                    dxi, dem, emi, umi = self.one_sided(
                        defi, x_long[-1], dem_long[-1], em_long[-1],
                        um_long[-1], short_side, iter_damage)

                    if x_long[-1] + dxi >= Lmax:
                        # boundary reached at longer side
                        deltax = Lmax - x_long[-1]
                        x_long.append(Lmax)
                        em_long.append(em_long[-1] + dem_long[-1] * deltax)
                        um_long.append(um_long[-1] +
                                       (em_long[-2] + em_long[-1]) * deltax /
                                       2.)
                        epsf0_clamped = self.clamped(defi, x_short[-1],
                                                     x_long[-1], em_short[-1],
                                                     em_long[-1], um_short[-1],
                                                     um_long[-1])
                        epsf0[i] = epsf0_clamped
                    else:
                        dem_long.append(dem)
                        x_long.append(x_long[-1] + dxi)
                        em_long.append(emi)
                        um_long.append(umi)
                        epsf0[i] = (em_long[-1] + x_long[-1] * defi)

            elif x_short[-1] == Lmin and x_long[-1] < Lmax:
                #one sided pullout
                clamped = [x_short[-1], um_short[-1]]
                dxi, dem, emi, umi = self.one_sided(defi, x_long[-1],
                                                    dem_long[-1], em_long[-1],
                                                    um_long[-1], clamped,
                                                    iter_damage)
                if x_long[-1] + dxi < Lmax:
                    dem_long.append(dem)
                    x_long.append(x_long[-1] + dxi)
                    em_long.append(emi)
                    um_long.append(umi)
                    epsf0[i] = (em_long[-1] + x_long[-1] * defi)
                else:
                    dxi = Lmax - x_long[-1]
                    x_long.append(Lmax)
                    em_long.append(em_long[-1] + dem_long[-1] * dxi)
                    um_long.append(um_long[-1] +
                                   (em_long[-2] + em_long[-1]) * dxi / 2.)
                    epsf0_clamped = self.clamped(defi, x_short[-1], x_long[-1],
                                                 em_short[-1], em_long[-1],
                                                 um_short[-1], um_long[-1])
                    epsf0[i] = epsf0_clamped

            elif x_short[-1] == Lmin and x_long[-1] == Lmax:
                #clamped fibers
                epsf0_clamped = self.clamped(defi, x_short[-1], x_long[-1],
                                             em_short[-1], em_long[-1],
                                             um_short[-1], um_long[-1])
                epsf0[i] = epsf0_clamped
        self._x_arr = np.hstack(
            (-np.array(x_short)[::-1][:-1], np.array(x_long)))
        self._epsm_arr = np.hstack(
            (np.array(em_short)[::-1][:-1], np.array(em_long)))
        self._epsf0_arr = epsf0
        residuum = self.vect_xi_cdf(epsf0, x_short=x_short,
                                    x_long=x_long) - iter_damage
        return residuum

    _x_arr = Array

    def __x_arr_default(self):
        return np.repeat(1e-10, len(self.sorted_depsf))

    _epsm_arr = Array

    def __epsm_arr_default(self):
        return np.repeat(1e-10, len(self.sorted_depsf))

    _epsf0_arr = Array

    def __epsf0_arr_default(self):
        return np.repeat(1e-10, len(self.sorted_depsf))

    damage = Property(depends_on='w, Ll, Lr, reinforcement+')

    @cached_property
    def _get_damage(self):
        ff = time.clock()
        if self.w == 0.:
            damage = np.zeros_like(self.sorted_depsf)
        else:
            ff = t.clock()
            try:
                damage = broyden2(self.damage_residuum,
                                  0.2 * np.ones_like(self.sorted_depsf),
                                  maxiter=20)
            except:
                print 'broyden2 does not converge fast enough: switched to fsolve for this step'
                damage = fsolve(self.damage_residuum,
                                0.2 * np.ones_like(self.sorted_depsf))
            print 'damage =', np.sum(damage) / len(
                damage), 'iteration time =', time.clock() - ff, 'sec'
        return damage
Ejemplo n.º 4
0
class CBView(ModelView):
    def __init__(self, **kw):
        super(CBView, self).__init__(**kw)
        self.on_trait_change(self.refresh, 'model.+params')
        self.refresh()

    model = Instance(Model)

    figure = Instance(Figure)

    def _figure_default(self):
        figure = Figure(facecolor='white')
        return figure

    figure2 = Instance(Figure)

    def _figure2_default(self):
        figure = Figure(facecolor='white')
        return figure

    data_changed = Event

    def plot(self, fig, fig2):
        figure = fig
        figure.clear()
        axes = figure.gca()
        # plot PDF
        axes.plot(self.model.w, self.model.model_rand, lw=2.0, color='blue', \
                  label='model')
        axes.plot(self.model.w, self.model.interpolate_experiment(self.model.w), lw=1.0, color='black', \
                  label='experiment')
        axes.legend(loc='best')


#         figure2 = fig2
#         figure2.clear()
#         axes = figure2.gca()
#         # plot PDF
#         axes.plot(self.model.w2, self.model.model_extrapolate, lw=2.0, color='red', \
#                   label='model')
#         axes.legend()

    def refresh(self):
        self.plot(self.figure, self.figure2)
        self.data_changed = True

    traits_view = View(HSplit(VGroup(
        Group(
            Item('model.tau_scale'),
            Item('model.tau_shape'),
            Item('model.tau_loc'),
            Item('model.m'),
            Item('model.sV0'),
            Item('model.Ef'),
            Item('model.w_min'),
            Item('model.w_max'),
            Item('model.w_pts'),
            Item('model.n_int'),
            Item('model.w2_min'),
            Item('model.w2_max'),
            Item('model.w2_pts'),
            Item('model.sigmamu'),
        ),
        id='pdistrib.distr_type.pltctrls',
        label='Distribution parameters',
        scrollable=True,
    ),
                              Tabbed(
                                  Group(
                                      Item('figure',
                                           editor=MPLFigureEditor(),
                                           show_label=False,
                                           resizable=True),
                                      scrollable=True,
                                      label='Plot',
                                  ),
                                  label='Plot',
                                  id='pdistrib.figure.params',
                                  dock='tab',
                              ),
                              Tabbed(
                                  Group(
                                      Item('figure2',
                                           editor=MPLFigureEditor(),
                                           show_label=False,
                                           resizable=True),
                                      scrollable=True,
                                      label='Plot',
                                  ),
                                  label='Plot',
                                  id='pdistrib.figure2',
                                  dock='tab',
                              ),
                              dock='tab',
                              id='pdistrib.figure.view'),
                       id='pdistrib.view',
                       dock='tab',
                       title='Statistical distribution',
                       buttons=[OKButton, CancelButton],
                       scrollable=True,
                       resizable=True,
                       width=600,
                       height=400)
Ejemplo n.º 5
0
class RFView3D(ModelView):

    model = Instance(IRF)

    scalar_arr = Property(depends_on='var_enum')

    @cached_property
    def _get_scalar_arr(self):
        return getattr(self.data, self.var_enum_)

    color_map = Str('blue-red')

    scene = Instance(MlabSceneModel, ())
    plot = Instance(PipelineBase)

    # When the scene is activated or parameters change the scene is updated
    @on_trait_change('model.')
    def update_plot(self):

        x_arrr, y_arrr, z_arrr = self.data.cut_data[0:3]
        scalar_arrr = self.scalar_arr

        mask = y_arrr > -1

        x = x_arrr[mask]
        y = y_arrr[mask]
        z = z_arrr[mask]
        scalar = scalar_arrr[mask]

        connections = -ones_like(x_arrr)
        mesk = x_arrr.filled() > -1
        connections[mesk] = list(range(0, len(connections[mesk])))
        connections = connections[self.start_fib:self.end_fib + 1, :].filled()
        connection = connections.astype(int).copy()
        connection = connection.tolist()

        # TODO: better
        for i in range(0, self.data.n_cols + 1):
            for item in connection:
                try:
                    item.remove(-1)
                except:
                    pass

        if self.plot is None:
            print('plot 3d -- 1')
            #self.scene.parallel_projection = False
            pts = self.scene.mlab.pipeline.scalar_scatter(
                array(x), array(y), array(z), array(scalar))
            pts.mlab_source.dataset.lines = connection
            self.plot = self.scene.mlab.pipeline.surface(
                self.scene.mlab.pipeline.tube(
                    #                        fig.scene.mlab.pipeline.stripper(
                    pts,
                    figure=self.scene.mayavi_scene,
                    #                        ),
                    tube_sides=10,
                    tube_radius=0.015,
                ), )
            self.plot.actor.mapper.interpolate_scalars_before_mapping = True
            self.plot.module_manager.scalar_lut_manager.show_scalar_bar = True
            self.plot.module_manager.scalar_lut_manager.show_legend = True
            self.plot.module_manager.scalar_lut_manager.shadow = True
            self.plot.module_manager.scalar_lut_manager.label_text_property.italic = False

            self.plot.module_manager.scalar_lut_manager.scalar_bar.orientation = 'horizontal'
            self.plot.module_manager.scalar_lut_manager.scalar_bar_representation.position2 = array(
                [0.61775334, 0.17])
            self.plot.module_manager.scalar_lut_manager.scalar_bar_representation.position = array(
                [0.18606834, 0.08273163])
            self.plot.module_manager.scalar_lut_manager.scalar_bar.width = 0.17000000000000004

            self.plot.module_manager.scalar_lut_manager.lut_mode = self.color_map  #'black-white'
            self.plot.module_manager.scalar_lut_manager.data_name = self.var_enum
            self.plot.module_manager.scalar_lut_manager.label_text_property.font_family = 'times'
            self.plot.module_manager.scalar_lut_manager.label_text_property.shadow = True
            self.plot.module_manager.scalar_lut_manager.title_text_property.color = (
                0.0, 0.0, 0.0)
            self.plot.module_manager.scalar_lut_manager.label_text_property.color = (
                0.0, 0.0, 0.0)
            self.plot.module_manager.scalar_lut_manager.title_text_property.font_family = 'times'
            self.plot.module_manager.scalar_lut_manager.title_text_property.shadow = True

            #fig.scene.parallel_projection = True
            self.scene.scene.background = (1.0, 1.0, 1.0)
            self.scene.scene.camera.position = [
                16.319534155794827, 10.477447863842627, 6.1717943847883232
            ]
            self.scene.scene.camera.focal_point = [
                3.8980860486356859, 2.4731178194274621, 0.14856957086692035
            ]
            self.scene.scene.camera.view_angle = 30.0
            self.scene.scene.camera.view_up = [
                -0.27676100729835512, -0.26547169369097656, 0.92354107904740446
            ]
            self.scene.scene.camera.clipping_range = [
                7.7372124315754673, 26.343575352248056
            ]
            self.scene.scene.camera.compute_view_plane_normal()
            #fig.scene.reset_zoom()

            axes = Axes()
            self.scene.engine.add_filter(axes, self.plot)
            axes.label_text_property.font_family = 'times'
            axes.label_text_property.shadow = True
            axes.title_text_property.font_family = 'times'
            axes.title_text_property.shadow = True
            axes.property.color = (0.0, 0.0, 0.0)
            axes.title_text_property.color = (0.0, 0.0, 0.0)
            axes.label_text_property.color = (0.0, 0.0, 0.0)
            axes.axes.corner_offset = .1
            axes.axes.x_label = 'x'
            axes.axes.y_label = 'y'
            axes.axes.z_label = 'z'
        else:
            print('plot 3d -- 2')
            #self.plot.mlab_source.dataset.reset()
            #self.plot.mlab_source.set( x = x, y = y, z = z, scalars = scalar )
            #self.plot.mlab_source.dataset.points = array( [x, y, z] ).T
            self.plot.mlab_source.scalars = scalar
            self.plot.mlab_source.dataset.lines = connection
            self.plot.module_manager.scalar_lut_manager.data_name = self.var_enum

    # The layout of the dialog created
    view = View(
        Item('scene',
             editor=SceneEditor(scene_class=MayaviScene),
             height=250,
             width=300,
             show_label=False),
        Group(
            '_',
            'start_fib',
            'end_fib',
            'var_enum',
        ),
        resizable=True,
    )
Ejemplo n.º 6
0
class SFBMushRoofModel(IBVModel):
    '''SFB - Demontrator model specification.
    '''
    implements(ISimModel)

    # number of elements in all dims

    n_elems_xy = Int(10, ps_levels=(20, 80, 3))

    n_dofs_xy = Property(Int, depends_on='+ps_levels')

    def _get_n_dofs_xy(self):
        if self.fets == self.fe_2D_linear:
            return self.n_elems_xy + 1
        elif self.fets == self.fe_2D_quadratic:
            return int(self.n_elems_xy * 2)
        else:
            raise ValueError

    rtrace_list = Property(List, depends_on='+ps_levels')

    @cached_property
    def _get_rtrace_list(self):
        return [self.max_princ_stress, self.sig_app, self.u]


#    sig_trace = RTraceDomainListField( name = 'Stress' ,
#                               var = 'sig_app', warp = False,
#                               record_on = 'update' )
#    eps_trace = RTraceDomainListField( name = 'Epsilon' ,
#                                       var = 'eps_app', warp = True,
#                                       record_on = 'update' )
#    eps0_trace = RTraceDomainListField( name = 'Epsilon 0' ,
#                                       var = 'eps0_app', warp = True,
#                                       record_on = 'update' )
#    eps1t_trace = RTraceDomainListField( name = 'Epsilon 1-t' ,
#                                       var = 'eps1t_app', warp = True,
#                                       record_on = 'update' )
#    u_trace = RTraceDomainListField( name = 'Displacement' ,
#                                       var = 'u', warp = True,
#                                       record_on = 'update' )

# dimensions of the shell structure

    length_xy = Float(1.)  # [m]

    E = Float(30000)  # [MPa]
    nu = Float(0.2)  # [-]
    alpha = Float(1e-3)
    # variable type of the finite element
    fets = Instance(FETSEval, ps_levels=['fe_2D_linear', 'fe_2D_quadratic'])

    def _fets_default(self):
        return self.fe_2D_quadratic

    mats = Instance(MATS2DElastic)

    def _mats_default(self):
        return MATS2DElastic(E=self.E,
                             nu=self.nu,
                             initial_strain=TemperatureLinFn(
                                 length=self.length_xy,
                                 n_dims=2,
                                 T_right=50,
                                 T_left=50,
                                 offset=0.5,
                                 alpha=self.alpha))

    fe_2D_linear = Instance(FETSEval, transient=True)

    def _fe_2D_linear_default(self):
        return FETS2D4Q(mats_eval=self.mats)

    fe_2D_quadratic = Instance(FETSEval, transient=True)

    def _fe_2D_quadratic_default(self):
        return FETS2D4Q8U(mats_eval=self.mats)

    def get_sim_outputs(self):
        '''
        Specifies the results and their order returned by the model
        evaluation.
        '''
        return [
            SimOut(name='u_z_free_corner', unit='m'),
            SimOut(name='max principle stress', unit='MPa'),
            SimOut(name='max sig_yy', unit='MPa'),
            SimOut(name='max sig_xx', unit='MPa'),
        ]

    def peval(self):
        '''
        Evaluate the model and return the array of results specified
        in the method get_sim_outputs.
        '''
        U = self.tloop.eval()
        u_corner = U[self.center_top_dof][-1, -1, 0]
        max_princ_stress = max(
            self.max_princ_stress._get_field_data().flatten())

        max_sig_yy = max(self.sig_app._get_field_data()[:, 4])
        max_sig_xx = max(self.sig_app._get_field_data()[:, 0])

        return array([u_corner, max_princ_stress, max_sig_yy, max_sig_xx],
                     dtype='float_')

    tline = Instance(TLine)

    def _tline_default(self):
        return TLine(min=0.0, step=1.0, max=1.0)

    max_princ_stress = Property(Instance(RTraceDomainListField),
                                depends_on='+ps_levels')

    @cached_property
    def _get_max_princ_stress(self):
        return RTraceDomainListField(
            name='max principle stress',
            idx=0,
            #                                   position = 'int_pnts',
            var='max_principle_sig',
            record_on='update',
        )

    sig_app = Property(Instance(RTraceDomainListField),
                       depends_on='+ps_levels')

    @cached_property
    def _get_sig_app(self):
        return RTraceDomainListField(
            name='sig_app',
            #                                  position = 'int_pnts',
            var='sig_app',
            record_on='update',
        )

    u = Property(Instance(RTraceDomainListField), depends_on='+ps_levels')

    @cached_property
    def _get_u(self):
        return RTraceDomainListField(
            name='displacement',
            var='u',
            warp=True,
            record_on='update',
        )

    #[ self.sig_trace, self.eps_trace, self.eps0_trace, self.eps1t_trace, self.u_trace]#, self.f_w_diagram ]

    fe_grid = Property(Instance(FEGrid), depends_on='+ps_levels')

    def _get_fe_grid(self):
        return FEGrid(coord_min=(0.0, 0.0),
                      coord_max=(1.0, 1.0),
                      shape=(self.n_elems_xy, self.n_elems_xy),
                      fets_eval=self.fets)

    # time loop
    tloop = Property(depends_on='+ps_levels')

    @cached_property
    def _get_tloop(self):

        self.fets.vtk_r *= 0.95

        domain = self.fe_grid

        self.center_top_dof = domain[-1, -1, -1, -1].dofs

        # NOTE: additional line-loads at the edge of the roof need to be considered!

        #        upper_surface = domain[:, :, -1, :, :, -1]
        #        whole_domain = domain[:, :, :, :, :, :]

        bcond_list = [
            BCSlice(var='u', dims=[0, 1], slice=domain[0, 0, 0, 0], value=0),
            BCSlice(var='u', dims=[0], slice=domain[0, -1, 0, -1], value=0),
        ]

        #       w_z = domain[-1, -1, -1, -1].dofs[0]

        #       self.f_w_diagram = RTraceGraph( name = 'load - corner deflection',
        #                                           var_x = 'U_k', idx_x = w_z,
        #                                           var_y = 'time', idx_y = 0,
        #                                           record_on = 'update' )
        #        rtrace_list = self.rtrace_list#[ self.f_w_diagram ] + self.rtrace_list

        ts = TS(sdomain=[domain],
                dof_resultants=True,
                bcond_list=bcond_list,
                rtrace_list=self.rtrace_list)
        # Add the time-loop control
        tloop = TLoop(tstepper=ts, tolerance=1e-4, tline=self.tline)
        return tloop
Ejemplo n.º 7
0
class RandomVariable( HasTraits ):
    '''Class representing the definition and discretization of a random variable.
    '''
    trait_value = Float

    source_trait = CTrait

    # name of the parameter
    name = Str

    pd = Instance( IPDistrib )
    def _pd_changed( self ):
        self.pd.n_segments = self._n_int

    changed = Event
    @on_trait_change( 'pd.changed,+changed' )
    def _set_changed( self ):
        self.changed = True

    # Switch the parameter random 
    random = Bool( False, changed = True )

    def set_random( self, distribution = 'uniform', discr_type = 'T grid',
                    loc = 0., scale = 1., shape = 1., n_int = 30 ):

        possible_distr = self.source_trait.distr
        if distribution and distribution not in possible_distr:
            raise AssertionError, 'distribution type %s not allowed for parameter %s' \
                % ( distribution, self.name )

        self.pd = PDistrib( distr_choice = distribution, n_segments = n_int )
        self.pd.distr_type.set( scale = scale, shape = shape, loc = loc )

        self.n_int = n_int
        self.discr_type = discr_type
        self.random = True

    def unset_random( self ):
        self.random = False

    # Number of integration points (relevant only for grid based methods)
    _n_int = Int
    n_int = Property
    def _set_n_int( self, value ):
        if self.pd:
            self.pd.n_segments = value
        self._n_int = value
    def _get_n_int( self ):
        return self.pd.n_segments

    # type of the RandomVariable discretization 
    discr_type = Enum( 'T grid', 'P grid', 'MC',
                        changed = True )
    def _discr_type_default( self ):
        return 'T grid'

    theta_arr = Property( Array( 'float_' ), depends_on = 'changed' )
    @cached_property
    def _get_theta_arr( self ):
        '''Get the discr_type of the pdistrib
        '''
        if not self.random:
            return array( [ self.trait_value ], dtype = float )

        if self.discr_type == 'T grid':

            # Get the discr_type from pdistrib and shift it
            # such that the midpoints of the segments are used for the
            # integration.

            x_array = self.pd.x_array
            # Note assumption of T grid discr_type
            theta_array = x_array[:-1] + self.pd.dx / 2.0

        elif self.discr_type == 'P grid':

            # P grid disretization generated from the inverse cummulative
            # probability
            #
            distr = self.pd.distr_type.distr
            # Grid of constant probabilities
            pi_arr = linspace( 0.5 / self.n_int, 1. - 0.5 / self.n_int, self.n_int )
            theta_array = distr.ppf( pi_arr )

        return theta_array

    dG_arr = Property( Array( 'float_' ), depends_on = 'changed' )
    @cached_property
    def _get_dG_arr( self ):

        if not self.random:
            return array( [ 1.0 ], dtype = float )

        if self.discr_type == 'T grid':

            d_theta = self.theta_arr[1] - self.theta_arr[0]
            return self.pd.get_pdf_array( self.theta_arr ) * d_theta

        elif self.discr_type == 'P grid':

            # P grid disretization generated from the inverse cummulative
            # probability
            #
            return array( [ 1.0 / float( self.n_int ) ], dtype = 'float_' )

    def get_rvs_theta_arr( self, n_samples ):
        if self.random:
            return self.pd.get_rvs_array( n_samples )
        else:
            return array( [self.trait_value], float )
class MFnChacoEditorToolbar(HasPrivateTraits):
    """ Toolbar displayed in table editors.
    """
    #---------------------------------------------------------------------------
    #  Trait definitions:
    #---------------------------------------------------------------------------

    # Do not sort columns:
    save_data = Instance(
        Action, {
            'name': 'Save as data',
            'tooltip': 'Save the function values',
            'action': 'on_savedata',
            'enabled': True,
            'image': ImageResource('add')
        })

    # Move current object up one row:
    save_fig = Instance(
        Action, {
            'name': 'Save as fig',
            'tooltip': 'Save as figure',
            'action': 'on_savefig',
            'enabled': True,
            'image': ImageResource('save')
        })

    # The table editor that this is the toolbar for:
    editor = Instance(_MFnChacoEditor)

    # The toolbar control:
    control = Any

    #---------------------------------------------------------------------------
    #  Initializes the toolbar for a specified window:
    #---------------------------------------------------------------------------

    def __init__(self, parent=None, **traits):
        super(MFnChacoEditorToolbar, self).__init__(**traits)
        factory = self.editor.factory

        actions = [self.save_data, self.save_fig]
        toolbar = ToolBar(image_size=(16, 16),
                          show_tool_names=False,
                          show_divider=False,
                          *actions)
        self.control = toolbar.create_tool_bar(parent, self)
        self.control.SetBackgroundColour(parent.GetBackgroundColour())

        # fixme: Why do we have to explictly set the size of the toolbar?
        #        Is there some method that needs to be called to do the
        #        layout?
        self.control.SetSize(wx.Size(23 * len(actions), 16))

    #---------------------------------------------------------------------------
    #  PyFace/Traits menu/toolbar controller interface:
    #---------------------------------------------------------------------------

    #---------------------------------------------------------------------------
    #  Adds a menu item to the menu bar being constructed:
    #---------------------------------------------------------------------------

    def add_to_menu(self, menu_item):
        """ Adds a menu item to the menu bar being constructed.
        """
        pass

    #---------------------------------------------------------------------------
    #  Adds a tool bar item to the tool bar being constructed:
    #---------------------------------------------------------------------------

    def add_to_toolbar(self, toolbar_item):
        """ Adds a toolbar item to the too bar being constructed.
        """
        pass

    #---------------------------------------------------------------------------
    #  Returns whether the menu action should be defined in the user interface:
    #---------------------------------------------------------------------------

    def can_add_to_menu(self, action):
        """ Returns whether the action should be defined in the user interface.
        """
        return True

    #---------------------------------------------------------------------------
    #  Returns whether the toolbar action should be defined in the user
    #  interface:
    #---------------------------------------------------------------------------

    def can_add_to_toolbar(self, action):
        """ Returns whether the toolbar action should be defined in the user
            interface.
        """
        return True

    #---------------------------------------------------------------------------
    #  Performs the action described by a specified Action object:
    #---------------------------------------------------------------------------

    def perform(self, action, action_event=None):
        """ Performs the action described by a specified Action object.
        """
        getattr(self.editor, action.action)()
class _MFnChacoEditor(Editor):
    """ Traits UI editor for displaying trait values in a MFnLine.
    """

    # TableEditorToolbar associated with the editor:
    toolbar = Any
    # The Traits UI associated with the function editor toolbar:
    #  toolbar_ui = Instance( UI )

    # adjustable parameters
    adapter = Instance(MFnPlotAdapter)

    splot = Instance(Plot)
    line_plot = Instance(LinePlot)

    #---------------------------------------------------------------------------
    #  Finishes initializing the editor by creating the underlying toolkit
    #  widget:
    #---------------------------------------------------------------------------

    plot_container = Instance(OverlayPlotContainer)

    def _plot_container_default(self):
        container = OverlayPlotContainer(padding=50,
                                         fill_padding=False,
                                         bgcolor=self.adapter.bgcolor,
                                         use_backbuffer=True)

        return container

        #---------------------------------------------------------------------------
        #  Finishes initializing the editor by creating the underlying toolkit
        #  widget:
        #---------------------------------------------------------------------------
        """ Finishes initializing the editor by creating the underlying toolkit
            widget.
        """

    def init(self, parent):

        factory = self.factory
        self.adapter = factory.adapter

        self.control = self._create_canvas(parent)

        # Register the update listener
        #
        self.value.on_trait_change(self.update_editor, 'data_changed')

    def update_editor(self):
        c = self.plot_container
        c.remove(*c.components)
        self._refresh_container()

    def _create_canvas(self, parent):
        '''Create canvas for chaco plots
        '''
        panel = wx.Panel(parent, -1, style=wx.CLIP_CHILDREN)

        container_panel = Window(panel, component=self.plot_container)

        sizer = wx.BoxSizer(wx.VERTICAL)
        sizer.Add(wx.StaticLine(parent, -1, style=wx.LI_HORIZONTAL), 0,
                  wx.EXPAND | wx.BOTTOM, 5)
        sizer.Add(self._create_toolbar(panel), 0, wx.EXPAND)
        sizer.Add(container_panel.control, 1, wx.EXPAND)

        sizer.Add(wx.StaticLine(parent, -1, style=wx.LI_HORIZONTAL), 0,
                  wx.EXPAND | wx.BOTTOM, 5)

        panel.SetSizer(sizer)

        return panel

    def _refresh_container(self):
        ''' rebuild the container for the current data
        '''
        broadcaster = BroadcasterTool()

        mfn_line = self.value
        ydata = transpose(mfn_line.ydata)

        adapter = self.adapter
        if adapter.var_x != '':
            # Get the x-label text from the object's trait var_x
            label_x = getattr(self.object, adapter.var_x)
        else:
            # Get the x-label from the adapter
            label_x = adapter.label_x

        if adapter.var_y != '':
            label_y = getattr(self.object, adapter.var_y)
        else:
            label_y = adapter.label_y

        index = ArrayDataSource(mfn_line.xdata)
        index_range = DataRange1D()
        index_range.add(index)
        index_mapper = LinearMapper(range=index_range)

        value_range = DataRange1D(low_setting=0.0)

        colors = []
        colors = adapter.line_color  #self.line_color_chaco.values()

        styles = []
        styles = adapter.line_style  #self.line_style_chaco.values()

        s_item = styles.items()

        color_chaco = []
        style_chaco = []
        c_index = 0  # loop for colors
        s_index = 0  # loop for styles
        i = 0  # for colors and styles
        plots = {}  # for legend

        pd = ArrayPlotData(index=mfn_line.xdata)
        self.splot = Plot(pd)

        for vector in ydata[:]:

            if len(colors) == c_index:
                c_index = 0
            if len(styles) == s_index:
                s_index = 0

            style_name = s_item[s_index][0]
            color_chaco.append(colors[c_index])
            style_chaco.append(style_name)
            c_index = c_index + 1
            s_index = s_index + 1

            y = ArrayDataSource(vector, sort_order="none")

            value_range.add(y)
            value_mapper = LinearMapper(range=value_range)

            self.line_plot = LinePlot(index=index,
                                      value=y,
                                      index_mapper=index_mapper,
                                      value_mapper=value_mapper,
                                      color=color_chaco[i],
                                      line_width=adapter.linewidth,
                                      edge_color='blue',
                                      border_visible=False,
                                      line_style=style_chaco[i])

            add_default_grids(self.line_plot)
            add_default_axes(self.line_plot, vtitle=label_y, htitle=label_x)

            self.plot_container.add(self.line_plot)
            #            pan = PanTool(line_plot)
            #            zoom = SimpleZoom(line_plot, tool_mode="box", always_on=False)
            #            broadcaster.tools.append(pan)
            #            broadcaster.tools.append(zoom)

            # Add the traits inspector tool to the container
            #
            #           self.plot_container.tools.append(TraitsTool( self.plot_container ))

            self.line_plot.tools.append(PanTool(self.line_plot))
            self.line_plot.overlays.append(ZoomTool(self.line_plot))

            # Legend
            lgnd = adapter.legend_labels[i]
            plots[lgnd] = self.line_plot

            # change the color of the curves
            i = i + 1

        legend = Legend(component=self.plot_container, padding=10, align="ul")
        legend.tools.append(LegendTool(legend, drag_button="right"))
        self.plot_container.overlays.append(legend)

        # Set the list of plots on the legend
        legend.plots = plots

        # Add the title at the top
        self.plot_container.overlays.append(
            PlotLabel(adapter.title,
                      component=self.plot_container,
                      font="swiss 16",
                      overlay_position="top"))

    #---------------------------------------------------------------------------
    #  Creates the table editing tool bar:
    #---------------------------------------------------------------------------

    def _create_toolbar(self, parent):
        """ Creates the table editing toolbar.
        """
        factory = self.factory

        panel = wx.Panel(parent, -1, style=wx.CLIP_CHILDREN)
        toolbar = MFnChacoEditorToolbar(parent=parent, editor=self)
        tb_sizer = wx.BoxSizer(wx.HORIZONTAL)
        panel.SetSizer(tb_sizer)

        self.toolbar = toolbar
        tb_sizer.Add(toolbar.control, 0)
        tb_sizer.Add((1, 1), 1, wx.EXPAND)

        return panel

    #---------------------------------------------------------------------------
    #  Handles the user requesting that columns not be sorted:
    #---------------------------------------------------------------------------
    def on_savedata(self):
        """ Handles the user requesting that the data of the function is to be saved.
        """
        import os
        dlg = FileDialog(parent=self.control,
                         title='Export function data',
                         default_directory=os.getcwd(),
                         default_filename="",
                         wildcard='*.csv',
                         action='save as')
        if dlg.open() == OK:
            path = dlg.path

            print "Saving data to", path, "..."
            try:
                vectors = []
                x_values = self.value.xdata
                y_values = self.value.ydata
                #savetxt( path, vstack( (x_values, y_values[:,0], y_values[:,1], y_values[:,2]) ).transpose() )

                print 'y_values', y_values
                y_values_tr = y_values.transpose()
                for vector in y_values_tr[:]:
                    vectors.append(vector)

                savetxt(path, vstack((x_values, vectors)).transpose())

            except:
                print "Error saving!"
                raise
            print "Plot saved."
        return

    def on_savefig(self):
        """ Handles the user requesting that the image of the function is to be saved.
        """
        import os
        dlg = FileDialog(parent=self.control,
                         title='Save as image',
                         default_directory=os.getcwd(),
                         default_filename="",
                         wildcard=WILDCARD,
                         action='save as')
        if dlg.open() == OK:
            path = dlg.path

            print "Saving plot to", path, "..."
            try:
                # Now we create a canvas of the appropriate size and ask it to render
                # our component.  (If we wanted to display this plot in a window, we
                # would not need to create the graphics context ourselves; it would be
                # created for us by the window.)
                size = (650, 400)
                gc = GraphicsContext(size)
                self.plot_container.draw(gc)
                gc.save(path)
            except:
                print "Error saving!"
                raise
            print "Plot saved."
        return
Ejemplo n.º 10
0
class SimBS(IBVModel):
    '''Plate test prepared for parametric study.
    '''

    input_change = Event

    @on_trait_change('+input,ccs_unit_cell.input_change')
    def _set_input_change(self):
        self.input_change = True

    implements(ISimModel)

    #-----------------
    # discretization:
    #-----------------
    #
    # discretization in x,y-direction:
    shape_x = Int(10, input=True, ps_levels=(8, 12, 3))

    shape_y = Int(5, input=True, ps_levels=(8, 12, 3))

    # discretization in y-direction (length - direction):
    #
    #    shape_L1 = Int(10, input = True,
    #                      ps_levels = (8, 12, 3))
    #    shape_L2 = Int(10, input = True,
    #                      ps_levels = (8, 12, 3))
    #
    #    shape_l = Property(Float, depends_on = 'shape_L1, shape_L2')
    #    @cached_property
    #    def _get_shape_l(self):
    #        return self.shape_L1 + self.shape_L2

    shape_l = Int(20, input=True)

    # discretization in arc-direction
    # (use aquidistant spacing along the arc):
    #
    shape_s = Int(10, input=True, ps_levels=(8, 12, 3))

    # discretization in z-direction (thickness direction):
    shape_z = Int(4, input=True, ps_levels=(2, 3, 3))

    #-----------------
    # geometry:
    #-----------------
    #

    # length_x
    #
    length = Float(2.20, input=True)

    # length_y
    # (= half arc width due to symmetry)
    width = Float(1.07, input=True)

    # length_z
    #
    arc_height = Float(0.50, input=True)

    thickness = Float(0.02, input=True)

    elem_length_x = Property(Float, depends_on='shape_x, shape_y, length')

    @cached_property
    def _get_elem_length_x(self):
        return self.length / self.shape_x

    #-----------------
    # phi function extended:
    #-----------------
    #
    phi_fn = Instance(IPhiFn, input=True)

    def _phi_fn_default(self):
        return PhiFnStrainHardening()

    #----------------------------------------------------------------------------------
    # mats_eval
    #----------------------------------------------------------------------------------

    # age of the plate at the time of testing
    # NOTE: that the same phi-function is used independent of age. This assumes a
    # an afine/proportional damage evolution for different ages.
    #
    age = Int(28, auto_set=False, enter_set=True, input=True)

    # composite E-modulus
    # 28 GPa = 28000 [MPa]
    E_c = Float(28e3, auto_set=False, enter_set=True, input=True)

    # composite E-modulus
    # 210 GPa = 210000 [MPa]
    E_s = Float(210e3, auto_set=False, enter_set=True, input=True)

    # Poisson's ratio
    #
    nu = Float(0.2, auto_set=False, enter_set=True, input=True)

    tstep = Float(0.05, auto_set=False, enter_set=True, input=True)

    # @todo: for mats_eval the information of the unit cell should be used
    # in order to use the same number of microplanes and model version etc...
    #
    mats_eval = Property(Instance(MATS2D5MicroplaneDamage),
                         depends_on='input_change')

    @cached_property
    def _get_mats_eval(self):
        mats_eval = MATS2D5MicroplaneDamage(E=self.E_c,
                                            nu=self.nu,
                                            n_mp=30,
                                            symmetrization='sum-type',
                                            model_version='compliance',
                                            phi_fn=self.phi_fn)

        return mats_eval

    elstmr_mats = Property(Instance(MATS3DElastic), depends_on='input_change')

    @cached_property
    def _get_elstmr_mats(self):

        max_eps = self.thickness * 1.1
        max_f = 0.020  # MN
        max_eps = 1.0  # [-]
        area = self.elem_length_x**2
        sig = max_f / area
        E_elast = sig / max_eps
        print 'effective elastomer E_modulus', E_elast
        return MATS3DElastic(E=E_elast, nu=0.2)

    supprt_mats = Property(Instance(MATS3DElastic), depends_on='input_change')

    @cached_property
    def _get_supprt_mats(self):
        return MATS3DElastic(E=self.E_s, nu=0.2)

    #-----------------
    # fets:
    #-----------------

    # use quadratic serendipity elements
    #
    specmn_fets = Property(Instance(FETSEval), depends_on='input_change')

    @cached_property
    def _get_specmn_fets(self):
        return FETS2D58H20U(mats_eval=self.mats_eval)

    # use quadratic serendipity elements
    #
    elstmr_fets = Property(Instance(FETSEval), depends_on='input_change')

    @cached_property
    def _get_elstmr_fets(self):
        return FETS2D58H20U(mats_eval=self.elstmr_mats)

    # use quadratic serendipity elements
    #
    supprt_fets = Property(Instance(FETSEval), depends_on='input_change')

    @cached_property
    def _get_supprt_fets(self):
        return FETS2D58H20U(mats_eval=self.supprt_mats)

    def peval(self):
        '''
        Evaluate the model and return the array of results specified
        in the method get_sim_outputs.
        '''
        U = self.tloop.eval()

        self.f_w_diagram_center.refresh()
        F_max = max(self.f_w_diagram_center.trace.ydata)

        u_center_top_z = U[self.center_top_dofs][0, 0, 2]
        return array([u_center_top_z, F_max], dtype='float_')

    fe_domain = Property(depends_on='+ps_levels, +input')

    @cached_property
    def _get_fe_domain(self):
        return FEDomain()

    specmn_fe_level = Property(depends_on='+ps_levels, +input')

    def _get_specmn_fe_level(self):
        return FERefinementGrid(name='specimen patch',
                                fets_eval=self.specmn_fets,
                                domain=self.fe_domain)

    barel_shell_geo = Property(Instance(BarelShellGeo), depends_on='+input')

    @cached_property
    def _get_barel_shell_geo(self):
        return BarelShellGeo(length_quarter=2.2,
                             width_quarter=1.07,
                             shape_l=self.shape_x,
                             shape_s=self.shape_y,
                             shape_z=self.shape_z,
                             arc_height=0.5,
                             thickness=0.02,
                             Lr=0.5,
                             L1=1.43)

    specmn_fe_grid = Property(Instance(FEGrid),
                              depends_on='+ps_levels, +input')

    @cached_property
    def _get_specmn_fe_grid(self):
        fe_grid = FEGrid(coord_max=(1, 1, 1),
                         shape=(self.shape_x, self.shape_y, self.shape_z),
                         level=self.specmn_fe_level,
                         geo_transform=self.barel_shell_geo,
                         fets_eval=self.specmn_fets)

        return fe_grid


#    elstmr_fe_level = Property(depends_on = '+ps_levels, +input')
#    def _get_elstmr_fe_level(self):
#        return  FERefinementGrid(name = 'elastomer patch',
#                                 fets_eval = self.elstmr_fets,
#                                 domain = self.fe_domain)
#
#    elstmr_fe_grid = Property(Instance(FEGrid), depends_on = '+ps_levels, +input')
#    @cached_property
#    def _get_elstmr_fe_grid(self):
#        # coordinates adapt to discretization
#        elstmr_min = self.length / 2.0 - self.elem_length_x
#        elstmr_max = self.length / 2.0
#        z_max = self.thickness * 1.1
#        return FEGrid(coord_min = (0, 0, 0),
#                      coord_max = (1, 1, 1),
#                      level = self.elstmr_fe_level,
#                      shape = (1, 1, 1),
#                      fets_eval = self.elstmr_fets)
#
#    supprt_fe_level = Property(depends_on = '+ps_levels, +input')
#    def _get_supprt_fe_level(self):
#        return  FERefinementGrid(name = 'support patch',
#                                 fets_eval = self.supprt_fets,
#                                 domain = self.fe_domain)
#
#    supprt_fe_grid = Property(Instance(FEGrid), depends_on = '+ps_levels, +input')
#    @cached_property
#    def _get_supprt_fe_grid(self):
#        # coordinates adapt to discretization
#        supprt_min = 0
#        supprt_max = self.elem_length_x
#        z_max = self.thickness * 0.1
#
#        def tappered_support(pts):
#            x_, y_, z_ = pts.T
#            dx = np.max(x_) - np.min(y_)
#            dy = np.max(y_) - np.min(y_)
#            dz = np.max(z_) - np.min(z_)
#
#            dz2 = dz / 2 / 2 / 2
#            dz4 = dz / 6 / 2 / 2
#
#            dz_red = (dz2 * x_ / dx + dz2 * y_ / dy - dz4 * x_ / dx * y_ / dy)
#
#            pts = c_[x_, y_, z_ + dz_red]
#            return pts
#
#        return FEGrid(coord_min = (0, 0, 0),
#                      coord_max = (1, 1, 1),
#                      level = self.supprt_fe_level,
#                      shape = (1, 1, 1),
#                      #geo_transform = tappered_support,
#                      fets_eval = self.supprt_fets)

    tloop = Property(depends_on='input_change')

    @cached_property
    def _get_tloop(self):

        #fets_eval.mats_eval.nu = self.nu
        specmn = self.specmn_fe_grid

        if False:
            elstmr = self.elstmr_fe_grid
            supprt = self.supprt_fe_grid

        self.fe_domain.n_dofs

        self.center_top_dofs = specmn[0, 0, -1, 0, 0, -1].dofs
        center_bottom_dofs = specmn[0, 0, 0, 0, 0, 0].dofs

        support_elem = self.shape_y - 4

        #--------------------------------------------------------------
        # boundary conditions for the symmetry and the single support
        #--------------------------------------------------------------
        bc_symplane_yz = BCSlice(var='u',
                                 value=0.,
                                 dims=[0],
                                 slice=specmn[0, :, :, 0, :, :])
        bc_symplane_xz = BCSlice(var='u',
                                 value=0.,
                                 dims=[1],
                                 slice=specmn[:, 0, :, :, 0, :])

        #--------------------------------------------------------------
        # boundary conditions for the symmetry and the single support
        #--------------------------------------------------------------
        support_slice = specmn[-1, support_elem, :, -1, 0, :]
        support_000 = BCSlice(var='u',
                              value=0.,
                              dims=[0, 2],
                              slice=support_slice)

        #--------------------------------------------------------------
        # loading
        #--------------------------------------------------------------
        # w_max = center displacement:
        w_max = -0.07  # [m]

        time_function = MFnLineArray(xdata=[0.0, 0.2, 0.4, 1.0],
                                     ydata=[0.0, 0.2, 0.75, 1.0])

        bc_el_w = BCSlice(
            var='u',
            value=w_max,
            dims=[2],  #time_function = time_function.get_value,
            slice=specmn[0, 0, 0, 0, 0, 0])

        p_max = -0.0042 / (0.2 * 0.2)
        p_slice = specmn[:, 0, -1, :, :, -1]
        bc_el_w = BCSlice(
            var='f',
            value=p_max,
            dims=[2],
            integ_domain='local',  #time_function = time_function.get_value,
            slice=p_slice)

        #--------------------------------------------------------------
        # ts
        #--------------------------------------------------------------
        center_dof = center_bottom_dofs[0, 0, 2]
        # center_top_line_dofs
        #
        #ctl_dofs = elstmr[:, :, -1, :, :, -1].dofs[:, :, 2].flatten()

        # force-displacement-diagram
        #
        self.f_w_diagram_center = RTraceGraph(
            name='displacement (center) - reaction 2',
            var_x='U_k',
            idx_x=center_dof,
            # elastomer load
            var_y='F_int',
            idx_y_arr=support_slice.dofs[:, :, 2].flatten(),
            record_on='update',
            transform_x='-x * 1000',  # %g * x' % ( fabs( w_max ),),
            # due to symmetry the total force sums up from four parts of the beam (2 symmetry axis):
            #
            transform_y='-2 * 1000. * y')

        bcond_list = [
            bc_symplane_yz,
            bc_symplane_xz,
            support_000,
            # var 1:
            bc_el_w,
            #                               bc_center_w,
            #                               # var 2:
            #                               bc_center_w_elem,
            #                               # var 3:
            #                               bc_center_w_xline, bc_center_w_yline
        ]
        rtrace_list = [
            self.f_w_diagram_center,
            RTraceDomainListField(name='Displacement',
                                  var='u',
                                  idx=0,
                                  warp=True),

            #                             RTraceDomainListField(name = 'Stress' ,
            #                                            var = 'sig_app', idx = 0, warp = True,
            #                                            record_on = 'update'),
            #                             RTraceDomainListField(name = 'Strain' ,
            #                                        var = 'eps_app', idx = 0, warp = True,
            #                                        record_on = 'update'),
            RTraceDomainListField(name='Damage',
                                  var='omega_mtx',
                                  idx=0,
                                  warp=True,
                                  record_on='update'),
            RTraceDomainListField(
                name='max principle stress',
                idx=0,
                var='max_principle_sig',
                warp=True,
                #                                      position = 'int_pnts',
                record_on='update',
            )
            #                             RTraceDomainListField(name = 'IStress' ,
            #                                            position = 'int_pnts',
            #                                            var = 'sig_app', idx = 0,
            #                                            record_on = 'update'),
            #                             RTraceDomainListField(name = 'IStrain' ,
            #                                            position = 'int_pnts',
            #                                            var = 'eps_app', idx = 0,
            #                                            record_on = 'update'),
        ]

        ts = TS(sdomain=self.fe_domain,
                bcond_list=bcond_list,
                rtrace_list=rtrace_list)

        print 'tstep', self.tstep
        # Add the time-loop control
        tloop = TLoop(
            tstepper=ts,

            #                       # allow only a low tolerance
            #                       #
            #                       KMAX = 50,
            #                       tolerance = 5e-4,

            # allow a high tolerance
            #
            KMAX=100,
            tolerance=0.001,

            #                       # allow a very high tolerance
            #                       #
            #                       KMAX = 50,
            #                       tolerance = 0.01,
            RESETMAX=0,
            debug=False,
            tline=TLine(min=0.0, step=self.tstep, max=1))

        return tloop

    def get_sim_outputs(self):
        '''
        Specifies the results and their order returned by the model
        evaluation.
        '''
        return [
            SimOut(name='u_center_top_z', unit='m'),
            SimOut(name='F_max', unit='kN')
        ]
Ejemplo n.º 11
0
class LCCTable(HasTraits):
    '''Loading Case Manager.
    Generates and sorts the loading case combinations
    of all specified loading cases.
    '''

    # define ls
    #
    ls = Trait('ULS', {'ULS': ULS, 'SLS': SLS})

    # lcc-instance for the view
    #
    lcc = Instance(LCC)

    #-------------------------------
    # Define loading cases:
    #-------------------------------

    # path to the directory containing the state data files
    #
    data_dir = Directory

    # list of load cases
    #
    lc_list_ = List(Instance(LC))

    lc_list = Property(List, depends_on='+filter')

    def _set_lc_list(self, value):
        self.lc_list_ = value

    def _get_lc_list(self):
        #        for lc in self.lc_list_:
        #            if lc.data_filter != self.data_filter:
        #                lc.data_filter = self.data_filter
        return self.lc_list_

    lcc_table_columns = Property(depends_on='lc_list_, +filter')

    def _get_lcc_table_columns(self):
        return [ ObjectColumn(label='Id', name='lcc_id') ] + \
               [ ObjectColumn(label=lc.name, name=lc.name)
                for idx, lc in enumerate(self.lc_list) ] + \
                [ ObjectColumn(label='assess_value', name='assess_value') ]

    geo_columns = Property(List(Str), depends_on='lc_list_, +filter')

    def _get_geo_columns(self):
        '''derive the order of the geo columns
        from the first element in 'lc_list'. The internal
        consistency is checked separately in the
        'check_consistency' method.
        '''
        return self.lc_list[0].geo_columns

    sr_columns = Property(List(Str), depends_on='lc_list_, +filter')

    def _get_sr_columns(self):
        '''derive the order of the stress resultants
        from the first element in 'lc_list'. The internal
        consistency is checked separately in the
        'check_consistency' method.
        '''
        return self.lc_list[0].sr_columns

    #-------------------------------
    # check consistency
    #-------------------------------

    def _check_for_consistency(self):
        ''' check input files for consitency:
        '''
        return True

    #-------------------------------
    # lc_arr
    #-------------------------------

    lc_arr = Property(Array)

    def _get_lc_arr(self):
        '''stack stress resultants arrays of all loading cases together.
        This yields an array of shape ( n_lc, n_elems, n_sr )
        '''
        sr_arr_list = [lc.sr_arr for lc in self.lc_list]
        #        for x in sr_arr_list:
        #            print x.shape

        return array(sr_arr_list)

    #-------------------------------
    # Array dimensions:
    #-------------------------------

    n_sr = Property(Int)

    def _get_n_sr(self):
        return len(self.sr_columns)

    n_lc = Property(Int)

    def _get_n_lc(self):
        return len(self.lc_list)

    n_lcc = Property(Int)

    def _get_n_lcc(self):
        return self.combi_arr.shape[0]

    n_elems = Property(Int)

    def _get_n_elems(self):
        return self.lc_list[0].sr_arr.shape[0]

    #-------------------------------
    # auxilary method for get_combi_arr
    #-------------------------------

    def _product(self, args):
        """
        Get all possible permutations of the security factors
        without changing the order of the loading cases.
        The method corresponds to the build-in function 'itertools.product'.
        Instead of returning a generator object a list of all
        possible permutations is returned. As argument a list of list
        needs to be defined. In the original version of 'itertools.product'
        the function takes a tuple as argument ("*args").
        """
        pools = map(tuple,
                    args)  # within original version args defined as *args
        result = [[]]
        for pool in pools:
            result = [x + [y] for x in result for y in pool]
        return result

    # ------------------------------------------------------------
    # 'combi_arr' - array containing indices of all loading case combinations:
    # ------------------------------------------------------------

    # list of indices of the position of the imposed loads in 'lc_list'
    #
#    imposed_idx_list = Property( List, depends_on = 'lc_list_, lc_list_.+input' )
    imposed_idx_list = Property(List, depends_on='lc_list_')

    @cached_property
    def _get_imposed_idx_list(self):
        '''list of indices for the imposed loads
        '''
        imposed_idx_list = []
        for i_lc, lc in enumerate(self.lc_list):
            cat = lc.category
            if cat == 'imposed-load':
                imposed_idx_list.append(i_lc)
        return imposed_idx_list

    # array containing the psi with name 'psi_key' for the specified
    # loading cases defined in 'lc_list'. For dead-loads no value for
    # psi exists. In this case a value of 1.0 is defined.
    # This yields an array of shape ( n_lc, )
    #
    def _get_psi_arr(self, psi_key):
        '''psi_key must be defined as:
        'psi_0', 'psi_1', or 'psi_2'
        Returns an 1d-array of shape ( n_lc, )
        '''
        # get list of ones (used for dead-loads):
        #
        psi_list = [1] * len(self.lc_list)

        # overwrite ones with psi-values in case of imposed-loads:
        #
        for imposed_idx in self.imposed_idx_list:
            psi_value = getattr(self.lc_list[imposed_idx], psi_key)
            psi_list[imposed_idx] = psi_value

        return array(psi_list, dtype='float_')

    # list containing names of the loading cases
    #
    lc_name_list = Property(List, depends_on='lc_list_')

    @cached_property
    def _get_lc_name_list(self):
        '''list of names of all loading cases
        '''
        return [lc.name for lc in self.lc_list]

    show_lc_characteristic = Bool(True)

    # combination array:
    #
    combi_arr = Property(Array, depends_on='lc_list_, combination_SLS')

    @cached_property
    def _get_combi_arr(self):
        '''array containing the security and combination factors
        corresponding to the specified loading cases.
        This yields an array of shape ( n_lcc, n_lc )

        Properties defined in the subclasses 'LCCTableULS', 'LCCTableSLS':
        - 'gamma_list' = list of security factors (gamma)
        - 'psi_lead' = combination factors (psi) of the leading imposed load
        - 'psi_non_lead' = combination factors (psi) of the non-leading imposed loads
        '''
        # printouts:
        #
        if self.ls == 'ULS':
            print '*** load case combinations for limit state ULS ***'
        else:
            print '*** load case combinations for limit state SLS ***'
            print '*** SLS combination used: % s ***' % (self.combination_SLS)

        #---------------------------------------------------------------
        # get permutations of safety factors ('gamma')
        #---------------------------------------------------------------
        #
        permutation_list = self._product(self.gamma_list)

        combi_arr = array(permutation_list)

        # check if imposed loads are defined
        # if not no further processing of 'combi_arr' is necessary:
        #
        if self.imposed_idx_list == []:

            # if option is set to 'True' the loading case combination table
            # is enlarged with an identity matrix in order to see the
            # characteristic values of each loading case.
            #
            if self.show_lc_characteristic:
                combi_arr = vstack([identity(self.n_lc), combi_arr])

            return combi_arr

        #---------------------------------------------------------------
        # get leading and non leading combination factors ('psi')
        #---------------------------------------------------------------
        # go through all possible cases of leading imposed loads
        # For the currently investigated imposed loading case the
        # psi value is taken from 'psi_leading_arr' for all other
        # imposed loads the psi value is taken from 'psi_non_lead_arr'

        # Properties are defined in the subclasses
        #
        psi_lead_arr = self.psi_lead_arr
        psi_non_lead_arr = self.psi_non_lead_arr

        # for SLS limit state case 'rare' all imposed loads are multiplied
        # with 'psi_2'. In this case no distinction between leading or
        # non-leading imposed loads needs to be performed.
        #
        if all(psi_lead_arr == psi_non_lead_arr):
            combi_arr_psi = combi_arr * psi_lead_arr

        # generate a list or arrays obtained by multiplication
        # with the psi-factors.
        # This yields a list of length = number of imposed-loads.
        #
        else:
            combi_arr_psi_list = []
            for imposed_idx in self.imposed_idx_list:
                # copy in order to preserve initial state of the array
                # and avoid in place modification
                psi_arr = copy(psi_non_lead_arr)
                psi_arr[imposed_idx] = psi_lead_arr[imposed_idx]
                combi_arr_lead_i = combi_arr[where(
                    combi_arr[:, imposed_idx] != 0)] * psi_arr
                combi_arr_psi_list.append(combi_arr_lead_i)

            combi_arr_psi_no_0 = vstack(combi_arr_psi_list)

            # missing cases without any dead load have to be added
            # get combinations with all!! imposed = 0
            #
            lcc_all_imposed_zero = where(
                (combi_arr[:, self.imposed_idx_list] == 0).all(axis=1))

            # add to combinations
            #
            combi_arr_psi = vstack(
                (combi_arr[lcc_all_imposed_zero], combi_arr_psi_no_0))

        #---------------------------------------------------------------
        # get exclusive loading cases ('exclusive_to')
        #---------------------------------------------------------------

        # get a list of lists containing the indices of the loading cases
        # that are defined exclusive to each other.
        # The list still contains duplicates, e.g. [1,2] and [2,1]
        #
        exclusive_list = []
        for i_lc, lc in enumerate(self.lc_list):

            # get related load case number
            #
            for exclusive_name in lc.exclusive_to:
                if exclusive_name in self.lc_name_list:
                    exclusive_idx = self.lc_name_list.index(exclusive_name)
                    exclusive_list.append([i_lc, exclusive_idx])

        # eliminate the duplicates in 'exclusive_list'
        #
        exclusive_list_unique = []
        for exclusive_list_entry in exclusive_list:
            if sorted(exclusive_list_entry) not in exclusive_list_unique:
                exclusive_list_unique.append(sorted(exclusive_list_entry))

        # delete the rows in combination array that contain
        # loading case combinations with imposed-loads that have been defined
        # as exclusive to each other.
        #
        combi_arr_psi_exclusive = combi_arr_psi
        #        print 'combi_arr_psi_exclusive', combi_arr_psi_exclusive
        for exclusive_list_entry in exclusive_list_unique:
            # check where maximum one value of the exclusive load cases is unequal to one
            #              LC1  LC2  LC3  (all LCs are defined as exclusive to each other)
            #
            # e.g.         1.5  0.9  0.8  (example of 'combi_arr_psi')
            #              1.5  0.0  0.0
            #              0.0  0.0  0.0  (combination with all imposed loads = 0 after multiplication wit psi and gamma)
            #              ...  ...  ...
            #
            # this would yield the following mask_arr (containing ones or zeros):
            # e.g.         1.0  1.0  1.0  --> sum = 3 --> true combi --> accepted combination
            #              1.0  0.0  0.0  --> sum = 1 --> false combi --> no accepted combination
            # e.g.         0.0  0.0  0.0  --> sum = 0 --> true combi --> accepted combination (only body-loads)
            #              ...  ...  ...
            #
            mask_arr = where(
                combi_arr_psi_exclusive[:, exclusive_list_entry] != 0, 1.0,
                0.0)
            #            print 'mask_arr', mask_arr
            true_combi = where(sum(mask_arr, axis=1) <= 1.0)
            #            print 'true_combi', true_combi
            combi_arr_psi_exclusive = combi_arr_psi_exclusive[true_combi]

        #---------------------------------------------------------------
        # create array with only unique load case combinations
        #---------------------------------------------------------------
        # If the psi values of an imposed-load are defined as zero this
        # may led to zero entries in 'combi_arr'. This would yield rows
        # in 'combi_arr' which are duplicates. Those rows are removed.

        # Add first row in 'combi_arr_psi_exclusive' to '_unique' array
        # This array must have shape (1, n_lc) in order to use 'axis'-option
        #
        combi_arr_psi_exclusive_unique = combi_arr_psi_exclusive[0][None, :]

        for row in combi_arr_psi_exclusive:
            # Check if all factors in one row are equal to the rows in 'unique' array.
            # If this is not the case for any row the combination is added to 'unique'.
            # Broadcasting is used for the bool evaluation:
            #
            if (row == combi_arr_psi_exclusive_unique).all(
                    axis=1.0).any() == False:
                combi_arr_psi_exclusive_unique = vstack(
                    (combi_arr_psi_exclusive_unique, row))

        # if option is set to 'True' the loading case combination table
        # is enlarged with an identity matrix in order to see the
        # characteristic values of each loading case.
        #
#        if self.show_lc_characteristic:
#            combi_arr_psi_exclusive_unique = vstack( [ identity( self.n_lc ), combi_arr_psi_exclusive_unique ] )

        return combi_arr_psi_exclusive_unique

    #-------------------------------
    # lcc_arr
    #-------------------------------

    lcc_arr = Property(Array, depends_on='lc_list_')

    @cached_property
    def _get_lcc_arr(self):
        '''Array of all loading case combinations following the
        loading cases define in 'lc_list' and the combinations
        defined in 'combi_arr'.
        This yields an array of shape ( n_lcc, n_elems, n_sr )
        '''
        self._check_for_consistency()

        combi_arr = self.combi_arr

        # 'combi_arr' is of shape ( n_lcc, n_lc )
        # 'lc_arr' is of shape ( n_lc, n_elems, n_sr )
        #
        lc_arr = self.lc_arr

        # Broadcasting is used to generate array containing the multiplied lc's
        # yielding an array of shape ( n_lcc, n_lc, n_elems, n_sr )
        #

        lc_combi_arr = lc_arr[None, :, :, :] * combi_arr[:, :, None, None]

        # Then the sum over index 'n_lc' is evaluated yielding
        # an array of all loading case combinations.
        # This yields an array of shape ( n_lcc, n_elem, n_sr )
        #
        lcc_arr = sum(lc_combi_arr, axis=1)

        return lcc_arr

    #-------------------------------
    # lcc_lists
    #-------------------------------

    lcc_list = Property(List, depends_on='lc_list_')

    @cached_property
    def _get_lcc_list(self):
        '''list of loading case combinations (instances of LCC)
        '''
        combi_arr = self.combi_arr
        lcc_arr = self.lcc_arr
        sr_columns = self.sr_columns
        geo_columns = self.geo_columns

        n_lcc = self.n_lcc

        # return a dictionary of the stress resultants
        # this is used by LSTable to determine the stress
        # resultants of the current limit state
        #
        lcc_list = []
        for i_lcc in range(n_lcc):

            state_data_dict = {}
            for i_sr, name in enumerate(sr_columns):
                state_data_dict[name] = lcc_arr[i_lcc, :, i_sr][:, None]

            geo_data_dict = self.geo_data_dict

            lcc = LCC(  # lcc_table = self,
                factors=combi_arr[i_lcc, :],
                lcc_id=i_lcc,
                ls_table=LSTable(geo_data=geo_data_dict,
                                 state_data=state_data_dict,
                                 ls=self.ls))

            for idx, lc in enumerate(self.lc_list):
                lcc.add_trait(lc.name, Int(combi_arr[i_lcc, idx]))

            lcc_list.append(lcc)

        return lcc_list

    #-------------------------------
    # geo_arr
    #-------------------------------

    geo_data_dict = Property(Dict, depends_on='lc_list_')

    @cached_property
    def _get_geo_data_dict(self):
        '''Array of global coords derived from the first loading case defined in lc_list.
        Coords are identical for all LC's.
        '''
        return self.lc_list[0].geo_data_dict

    #-------------------------------
    # min/max-values
    #-------------------------------

    def get_min_max_state_data(self):
        ''' get the surrounding curve of all 'lcc' values
        '''
        lcc_arr = self.lcc_arr
        min_arr = ndmin(lcc_arr, axis=0)
        max_arr = ndmax(lcc_arr, axis=0)
        return min_arr, max_arr

    #--------------------------------------
    # use for case 'max N*' nach ZiE
    # Fall 'maximale Normalkraft' nach ZiE
    #--------------------------------------
#    max_sr_grouped_dict = Property( Dict )
#    @cached_property
#    def _get_max_sr_grouped_dict( self ):
#        ''' get the surrounding curve for each stress resultant
#            shape lcc_array ( n_lcc, n_elems, n_sr )
#        '''
#        sr_columns = self.sr_columns
#        lcc_arr = self.lcc_arr
#        dict = {}
#        for i, sr in enumerate( self.sr_columns ):
#            idx_1 = argmax( abs( lcc_arr[:, :, i] ), axis = 0 )
#            idx_2 = arange( 0, idx_1.shape[0], 1 )
#            dict[sr] = lcc_arr[idx_1, idx_2, :]
#        return dict

#--------------------------------------
# use for case 'max eta' nach ZiE
# Fall max Ausnutzungsgrad nach ZiE
#--------------------------------------
    max_sr_grouped_dict = Property(Dict)

    @cached_property
    def _get_max_sr_grouped_dict(self):
        '''evaluate eta and prepare plot
        '''
        sr_columns = self.sr_columns
        lcc_arr = self.lcc_arr
        # ## N_s6cm_d results from 'V_op_d'*1.5
        # assume a distribution of stresses as for a simple
        # supported beam with cantilever corresponding
        # to the distance of the screws to each other and to the edge
        # of the TRC shell (33cm/17cm)
        #
        N_s6cm_d = lcc_arr[:, :, 2] * (17. + 33. + 1.) / 33.

        # ## V_s6cm_d results from 'N_ip_d'/2
        # assume an equal distribution (50% each) of the
        # normal forces to each screw
        #
        V_s6cm_d = lcc_arr[:, :, 0] * 0.56
        #        V_s6cm_d = ( ( lcc_arr[:, :, 0] / 2 ) ** 2 + ( lcc_arr[:, :, 1] * 1.5 ) ** 2 ) ** 0.5

        # resistance ac characteristic value obtained from the
        # experiment and EN DIN 1990
        #
        N_ck = 28.3
        V_ck = 63.8

        gamma_s = 1.5

        eta_N = N_s6cm_d / (N_ck / gamma_s)
        eta_V = abs(V_s6cm_d / (V_ck / gamma_s))
        eta_inter = (eta_N) + (eta_V)

        idx_max_hinge = eta_inter.argmax(axis=0)

        dict = {}
        for i, sr in enumerate(self.sr_columns):
            idx_1 = idx_max_hinge
            idx_2 = arange(0, idx_1.shape[0], 1)
            dict[sr] = lcc_arr[idx_1, idx_2, :]
        return dict

    def export_hf_max_grouped(self, filename):
        """exports the hinge forces as consistent pairs for the two case
        'max_eta' or 'max_N*'
        """
        from matplotlib import pyplot
        sr_columns = self.sr_columns
        dict = self.max_sr_grouped_dict
        length_xy_quarter = self.length_xy_quarter

        def save_bar_plot(x,
                          y,
                          filename='bla',
                          title='Title',
                          xlabel='xlabel',
                          ylabel='ylavel',
                          width=0.1,
                          xmin=0,
                          xmax=1000,
                          ymin=-1000,
                          ymax=1000,
                          figsize=[10, 5]):
            fig = pyplot.figure(facecolor="white", figsize=figsize)
            ax1 = fig.add_subplot(1, 1, 1)
            ax1.bar(x, y, width=width, align='center', color='green')
            ax1.set_xlim(xmin, xmax)
            ax1.set_ylim(ymin, ymax)
            ax1.set_xlabel(xlabel, fontsize=22)
            ax1.set_ylabel(ylabel, fontsize=22)
            if title == 'N_ip max':
                title = 'Fall max $\eta$'
#                title = 'Fall max $N^{*}$'

            if title == 'V_ip max':
                title = 'max $V_{ip}$'
            if title == 'V_op max':
                title = 'Fall max $V^{*}$'
            ax1.set_title(title)
            fig.savefig(filename, orientation='portrait', bbox_inches='tight')
            pyplot.clf()

        X = array(self.geo_data_dict['X_hf'])
        Y = array(self.geo_data_dict['Y_hf'])

        # symmetric axes
        #
        idx_sym = where(abs(Y[:, 0] - 2.0 * length_xy_quarter) <= 0.0001)
        X_sym = X[idx_sym].reshape(-1)
        idx_r0_r1 = where(abs(X[:, 0] - 2.0 * length_xy_quarter) <= 0.0001)
        X_r0_r1 = Y[idx_r0_r1].reshape(-1)

        for sr in sr_columns:
            F_int = dict[sr]  # first row N_ip, second V_ip third V_op
            F_sym = F_int[idx_sym, :].reshape(-1, len(sr_columns))
            F_r0_r1 = F_int[idx_r0_r1, :].reshape(-1, len(sr_columns))

            save_bar_plot(X_sym,
                          F_sym[:, 0].reshape(-1),
                          xlabel='$X$ [m]',
                          ylabel='$N^{*}_{Ed}$ [kN]',
                          filename=filename + 'N_ip' + '_sym_' + sr + '_max',
                          title=sr + ' max',
                          xmin=0.0,
                          xmax=3.5 * length_xy_quarter,
                          figsize=[10, 5],
                          ymin=-30,
                          ymax=+30)
            if self.link_type == 'inc_V_ip':
                save_bar_plot(X_sym,
                              F_sym[:, 1].reshape(-1),
                              xlabel='$X$ [m]',
                              ylabel='$V_{ip}$ [kN]',
                              filename=filename + 'V_ip' + '_sym_' + sr +
                              '_max',
                              title=sr + ' max',
                              xmin=0.0,
                              xmax=3.5 * length_xy_quarter,
                              figsize=[10, 5],
                              ymin=-30,
                              ymax=+30)

            save_bar_plot(X_sym,
                          F_sym[:, 2].reshape(-1),
                          xlabel='$X$ [m]',
                          ylabel='$V^{*}_{Ed}$ [kN]',
                          filename=filename + 'V_op' + '_sym_' + sr + '_max',
                          title=sr + ' max',
                          xmin=0.0,
                          xmax=3.5 * length_xy_quarter,
                          figsize=[10, 5],
                          ymin=-10,
                          ymax=+10)

            # r0_r1
            #
            save_bar_plot(X_r0_r1,
                          F_r0_r1[:, 0].reshape(-1),
                          xlabel='$Y$ [m]',
                          ylabel='$N^{*}_{Ed}$ [kN]',
                          filename=filename + 'N_ip' + '_r0_r1_' + sr + '_max',
                          title=sr + ' max',
                          xmin=0.0,
                          xmax=2.0 * length_xy_quarter,
                          figsize=[5, 5],
                          ymin=-30,
                          ymax=+30)
            if self.link_type == 'inc_V_ip':
                save_bar_plot(X_r0_r1,
                              F_r0_r1[:, 1].reshape(-1),
                              xlabel='$Y$ [m]',
                              ylabel='$V_{ip}$ [kN]',
                              filename=filename + 'V_ip' + '_r0_r1_' + sr +
                              '_max',
                              title=sr + ' max',
                              xmin=0.0,
                              xmax=2.0 * length_xy_quarter,
                              figsize=[5, 5],
                              ymin=-30,
                              ymax=+30)
            save_bar_plot(X_r0_r1,
                          F_r0_r1[:, 2].reshape(-1),
                          xlabel='$Y$ [m]',
                          ylabel='$V^{*}_{Ed}$ [kN]',
                          filename=filename + 'V_op' + '_r0_r1_' + sr + '_max',
                          title=sr + ' max',
                          xmin=0.0,
                          xmax=2.0 * length_xy_quarter,
                          figsize=[5, 5],
                          ymin=-10,
                          ymax=+10)

    def plot_interaction_s6cm(self):
        """get the maximum values (consistent pairs of N and V) and plot them in an interaction plot
        """

        lcc_arr = self.lcc_arr

        # ## F_Edt results from 'V_op_d'*1.5
        # assume a distribution of stresses as for a simple
        # supported beam with cantilever corresponding

        # to the distance of the screws to each other and to the edge
        # of the TRC shell (33cm/17cm)
        #
        F_Edt = lcc_arr[:, :, 2] * (17. + 33. + 1.) / 33.

        # ## F_EdV1 results from 'N_ip_d'/2
        # assume an equal distribution (50% each) of the
        # normal forces to each screw
        #
        F_EdV1 = lcc_arr[:, :, 0] * 0.56
        #        V_s6cm_d = ( ( lcc_arr[:, :, 0] / 2 ) ** 2 + ( lcc_arr[:, :, 1] * 1.5 ) ** 2 ) ** 0.5

        # resistance ac characteristic value obtained from the
        # experiment and EN DIN 1990
        #
        F_Rkt = 28.3
        F_RkV1 = 63.8

        gamma_M = 1.5

        eta_t = abs(F_Edt / (F_Rkt / gamma_M))
        eta_V1 = abs(F_EdV1 / (F_RkV1 / gamma_M))
        print 'eta_t.shape', eta_t.shape
        print 'eta_V1.shape', eta_V1.shape

        #        self.interaction_plot(abs(F_Edt), abs(F_EdV1))
        self.interaction_plot(eta_t, eta_V1)


#        eta_inter = ( eta_N ) + ( eta_V )
#
#        idx_max_hinge = eta_inter.argmax( axis = 0 )
#        idx_hinge = arange( 0, len( idx_max_hinge ), 1 )
#        plot_eta_N = eta_N[idx_max_hinge, idx_hinge]
#        plot_eta_V = eta_V[idx_max_hinge, idx_hinge]
#        self.interaction_plot( plot_eta_N, plot_eta_V )

    def interaction_plot(self, eta_N, eta_V):
        from matplotlib import font_manager
        ticks_font = font_manager.FontProperties(family='Times',
                                                 style='normal',
                                                 size=18,
                                                 weight='normal',
                                                 stretch='normal')
        from matplotlib import pyplot
        fig = pyplot.figure(facecolor="white", figsize=[10, 10])
        ax1 = fig.add_subplot(1, 1, 1)
        #            x = arange(0, 1.01, 0.01)
        #            y15 = (1 - x ** 1.5) ** (1 / 1.5)
        #            y = (1 - x)

        ax1.set_xlabel('$F_\mathrm{Ed,V1}/F_\mathrm{Rd,V1}$', fontsize=24)
        ax1.set_ylabel('$F_\mathrm{Ed,t}/F_\mathrm{Rd,t}$', fontsize=24)
        #            ax1.set_xlabel('$|N_\mathrm{Ed}|$' , fontsize=32)
        #            ax1.set_ylabel('$|V_\mathrm{Ed}|$', fontsize=32)

        #            ax1.plot(x , y, '--', color='black'
        #                      , linewidth=2.0)
        #            ax1.plot(x , y15, '--', color='black'
        #                      , linewidth=2.0)

        ax1.plot(eta_V, eta_N, 'wo', markersize=3)
        #            ax1.plot(eta_V, eta_N, 'o', color='green', markersize=8)

        #            ax1.plot( eta_V[where( limit < 1 )] , eta_N[where( limit < 1 )], 'o', markersize = 8 )
        #            ax1.plot( eta_V[where( limit > 1 )] , eta_N[where( limit > 1 )], 'o', color = 'red', markersize = 8 )

        for xlabel_i in ax1.get_xticklabels():
            xlabel_i.set_fontsize(24)
            xlabel_i.set_family('serif')

        for ylabel_i in ax1.get_yticklabels():
            ylabel_i.set_fontsize(24)
            ylabel_i.set_family('serif')

    #        ax1.plot( x , 1 - x, '--', color = 'black', label = 'lineare Interaktion' )

        ax1.set_xlim(0, 1.0)
        ax1.set_ylim(0, 1.0)
        ax1.legend()
        pyplot.show()
        pyplot.clf()

    # choose linking type (in-plane shear dof blocked or not)
    #
    link_type = Enum('exc_V_ip', 'inc_V_ip')

    # length of the shell (needed to plot the hinge forces plots correctly)
    #
    length_xy_quarter = 3.5  # m

    def export_hf_lc(self):
        """exports the hinge forces for each loading case separately
        """

        from matplotlib import pyplot
        sr_columns = self.sr_columns
        dict = self.max_sr_grouped_dict
        length_xy_quarter = self.length_xy_quarter

        def save_bar_plot(x,
                          y,
                          filename='bla',
                          xlabel='xlabel',
                          ylabel='ylavel',
                          ymin=-10,
                          ymax=10,
                          width=0.1,
                          xmin=0,
                          xmax=1000,
                          figsize=[10, 5]):
            fig = pyplot.figure(facecolor="white", figsize=figsize)
            ax1 = fig.add_subplot(1, 1, 1)
            ax1.bar(x, y, width=width, align='center', color='blue')
            ax1.set_xlim(xmin, xmax)
            ax1.set_ylim(ymin, ymax)
            ax1.set_xlabel(xlabel, fontsize=22)
            ax1.set_ylabel(ylabel, fontsize=22)
            fig.savefig(filename, orientation='portrait', bbox_inches='tight')
            pyplot.clf()

        X = array(self.geo_data_dict['X_hf'])
        Y = array(self.geo_data_dict['Y_hf'])

        # symmetric axes
        #
        idx_sym = where(abs(Y[:, 0] - 2.0 * length_xy_quarter) <= 0.0001)
        X_sym = X[idx_sym].reshape(-1)
        idx_r0_r1 = where(abs(X[:, 0] - 2.0 * length_xy_quarter) <= 0.0001)
        X_r0_r1 = Y[idx_r0_r1].reshape(-1)
        F_int = self.lc_arr

        for i, lc_name in enumerate(self.lc_name_list):
            filename = self.lc_list[i].plt_export

            max_N_ip = max(int(ndmax(F_int[i, :, 0], axis=0)) + 1, 1)
            max_V_ip = max(int(ndmax(F_int[i, :, 1], axis=0)) + 1, 1)
            max_V_op = max(int(ndmax(F_int[i, :, 2], axis=0)) + 1, 1)

            F_int_lc = F_int[i, :, :]  # first row N_ip, second V_ip third V_op
            F_sym = F_int_lc[idx_sym, :].reshape(-1, len(sr_columns))
            F_r0_r1 = F_int_lc[idx_r0_r1, :].reshape(-1, len(sr_columns))

            save_bar_plot(
                X_sym,
                F_sym[:, 0].reshape(-1),
                #                          xlabel = '$X$ [m]', ylabel = '$N^{ip}$ [kN]',
                xlabel='$X$ [m]',
                ylabel='$N^{*}$ [kN]',
                filename=filename + 'N_ip' + '_sym',
                xmin=0.0,
                xmax=3.5 * length_xy_quarter,
                ymin=-max_N_ip,
                ymax=max_N_ip,
                figsize=[10, 5])

            save_bar_plot(X_sym,
                          F_sym[:, 1].reshape(-1),
                          xlabel='$X$ [m]',
                          ylabel='$V_{ip}$ [kN]',
                          filename=filename + 'V_ip' + '_sym',
                          xmin=0.0,
                          xmax=3.5 * length_xy_quarter,
                          ymin=-max_V_ip,
                          ymax=max_V_ip,
                          figsize=[10, 5])

            save_bar_plot(
                X_sym,
                F_sym[:, 2].reshape(-1),
                #                          xlabel = '$X$ [m]', ylabel = '$V_{op}$ [kN]',
                xlabel='$X$ [m]',
                ylabel='$V^{*}$ [kN]',
                filename=filename + 'V_op' + '_sym',
                xmin=0.0,
                xmax=3.5 * length_xy_quarter,
                ymin=-max_V_op,
                ymax=max_V_op,
                figsize=[10, 5])

            # r0_r1
            #
            save_bar_plot(
                X_r0_r1,
                F_r0_r1[:, 0].reshape(-1),
                #                          xlabel = '$Y$ [m]', ylabel = '$N_{ip}$ [kN]',
                xlabel='$Y$ [m]',
                ylabel='$N^{*}$ [kN]',
                filename=filename + 'N_ip' + '_r0_r1',
                xmin=0.0,
                xmax=2.0 * length_xy_quarter,
                ymin=-max_N_ip,
                ymax=max_N_ip,
                figsize=[5, 5])
            save_bar_plot(X_r0_r1,
                          F_r0_r1[:, 1].reshape(-1),
                          xlabel='$Y$ [m]',
                          ylabel='$V_{ip}$ [kN]',
                          filename=filename + 'V_ip' + '_r0_r1',
                          xmin=0.0,
                          xmax=2.0 * length_xy_quarter,
                          ymin=-max_V_ip,
                          ymax=max_V_ip,
                          figsize=[5, 5])
            save_bar_plot(
                X_r0_r1,
                F_r0_r1[:, 2].reshape(-1),
                #                          xlabel = '$Y$ [m]', ylabel = '$V_{op}$ [kN]',
                xlabel='$Y$ [m]',
                ylabel='$V^{*}$ [kN]',
                filename=filename + 'V_op' + '_r0_r1',
                xmin=0.0,
                xmax=2.0 * length_xy_quarter,
                ymin=-max_V_op,
                ymax=max_V_op,
                figsize=[5, 5])

    # ------------------------------------------------------------
    # View
    # ------------------------------------------------------------

    traits_view = View(VGroup(
        VSplit(
            Item('lcc_list', editor=lcc_list_editor, show_label=False),
            Item('lcc@', show_label=False),
        ), ),
                       resizable=True,
                       scrollable=True,
                       height=1.0,
                       width=1.0)
Ejemplo n.º 12
0
class SimBSDB(SimBS):

    # vary the failure strain in PhiFnGeneralExtended:
    factor_eps_fail = Float(1.4, input=True, ps_levels=(1.0, 1.2, 3))

    #-----------------
    # composite cross section unit cell:
    #-----------------
    #
    ccs_unit_cell_key = Enum('FIL-10-09_2D-05-11_0.00462_all0',
                             CCSUnitCell.db.keys(),
                             simdb=True,
                             input=True,
                             auto_set=False,
                             enter_set=True)

    ccs_unit_cell_ref = Property(Instance(SimDBClass),
                                 depends_on='ccs_unit_cell_key')

    @cached_property
    def _get_ccs_unit_cell_ref(self):
        return CCSUnitCell.db[self.ccs_unit_cell_key]

    #-----------------
    # damage function:
    #-----------------
    #
    material_model = Str(input=True)

    def _material_model_default(self):
        # return the material model key of the first DamageFunctionEntry
        # This is necessary to avoid an ValueError at setup
        return self.ccs_unit_cell_ref.damage_function_list[0].material_model

    calibration_test = Str(input=True)

    def _calibration_test_default(self):
        # return the material model key of the first DamageFunctionEntry
        # This is necessary to avoid an ValueError at setup
        return self.ccs_unit_cell_ref.damage_function_list[0].calibration_test

    damage_function = Property(Instance(MFnLineArray),
                               depends_on='input_change')

    @cached_property
    def _get_damage_function(self):
        return self.ccs_unit_cell_ref.get_param(self.material_model,
                                                self.calibration_test)

    #-----------------
    # phi function extended:
    #-----------------
    #
    phi_fn = Property(Instance(PhiFnGeneralExtendedExp),
                      depends_on='input_change,+ps_levels')

    @cached_property
    def _get_phi_fn(self):
        return PhiFnGeneralExtendedExp(mfn=self.damage_function, Efp_frac=0.2)


#        return PhiFnStrainHardening()
#        return PhiFnGeneralExtended( mfn = self.damage_function,
#                                     factor_e    ps_fail = self.factor_eps_fail )

#----------------------------------------------------------------------------------
# mats_eval
#----------------------------------------------------------------------------------

# age of the plate at the time of testing
# NOTE: that the same phi-function is used independent of age. This assumes a
# an afine/proportional damage evolution for different ages.
#

    age = Int(
        28,  #input = True
    )

    # composite E-modulus
    #
    E_c = Property(Float, depends_on='input_change')

    @cached_property
    def _get_E_c(self):
        return self.ccs_unit_cell_ref.get_E_c_time(self.age)

    # Poisson's ratio
    #
    nu = Property(Float, depends_on='input_change')

    @cached_property
    def _get_nu(self):
        return self.ccs_unit_cell_ref.nu
Ejemplo n.º 13
0
class DoublePulloutSym(RF):

    implements(IRF)

    title = Str('symetrical yarn pullout')

    xi = Float(0.0179,
               auto_set=False,
               enter_set=True,
               input=True,
               distr=['weibull_min', 'uniform'])

    tau_fr = Float(2.5,
                   auto_set=False,
                   enter_set=True,
                   input=True,
                   distr=['uniform', 'norm'])
    # free length
    l = Float(0.0,
              auto_set=False,
              enter_set=True,
              input=True,
              distr=['uniform'])

    d = Float(26e-3,
              auto_set=False,
              input=True,
              enter_set=True,
              distr=['uniform', 'weibull_min'])

    E_mod = Float(72.0e3,
                  auto_set=False,
                  enter_set=True,
                  input=True,
                  distr=['uniform'])
    # slack
    theta = Float(0.01,
                  auto_set=False,
                  enter_set=True,
                  input=True,
                  distr=['uniform', 'norm'])

    phi = Float(1.,
                auto_set=False,
                enter_set=True,
                input=True,
                distr=['uniform', 'norm'])

    # embedded length
    L = Float(1.,
              auto_set=False,
              enter_set=True,
              input=True,
              distr=['uniform'])

    free_fiber_end = Bool(True, input=True)

    w = Float(enter_set=True, input=True, ctrl_range=(0, 1, 10))

    weave_code = '''
        '''

    def __call__(self, w, tau_fr, l, d, E_mod, theta, xi, phi, L):
        '''Return the force for a prescribed crack opening displacement w.
        '''
        A = pi * d**2 / 4.
        l = l * (1 + theta)
        w = w - theta * l
        Tau = tau_fr * phi * d * pi
        P_ = 0.5 * (-l * Tau +
                    sqrt(l**2 * Tau**2 + 4 * w * H(w) * E_mod * A * Tau))
        # one sided pullout P_ = ( -l * Tau + sqrt( l ** 2 * Tau ** 2 + 2 * w * H( w ) * E_mod * A * Tau ) )

        if self.free_fiber_end:
            # ------ FREE LENGTH -------

            # frictional force along the bond length
            P_fr = Tau * (L - l)

            # if pullout_criterion positive - embedded
            # otherwise pulled out
            #
            pull_out_criterion = P_fr - P_
            P_ = P_ * H(pull_out_criterion) + P_fr * H(-pull_out_criterion)
        else:
            # --------------------------
            # ------ clamped fiber end ---------
            v = L * (l * Tau + Tau * L) / E_mod / A
            P_ = P_ * H(Tau * L - P_) + (Tau * L + (w - v) /
                                         (l + 2 * L) * A * E_mod) * H(P_ -
                                                                      Tau * L)
            # ----------------------------------
        P = P_ * H(A * E_mod * xi - P_)
        return P

    figure = Instance(Figure)

    def _figure_default(self):
        figure = Figure(facecolor='white')
        return figure

    changed = Event

    @on_trait_change('+input')
    def _set_changed(self):
        self.changed = True

    data_changed = Event

    @on_trait_change('+input')
    def refresh(self):
        figure = self.figure
        figure.clear()
        axes = figure.gca()

        P_fn = lambda w: self.__call__(w, self.tau_fr, self.l, self.d, self.
                                       E_mod, self.theta, self.xi, self.phi,
                                       self.L)
        pyF = frompyfunc(P_fn, 1, 1)

        w_arr = linspace(0.0, 1.0, 100)
        P_arr = array(pyF(w_arr), dtype='float_')

        axes.plot(w_arr, P_arr, lw=1.0, color='blue')

        self.data_changed = True

    group_attribs = VGroup(
        Item('tau_fr'),
        Item('l'),
        Item('d'),
        Item('E_mod'),
        Item('theta'),
        Item('xi'),
        Item('phi'),
        Item('L'),
        Item('free_fiber_end'),
    ),

    traits_view = View(
        group_attribs,
        scrollable=True,
        resizable=True,
        id='mk.figure.attribs',
        dock='tab',
    )

    traits_view_diag = View(HSplit(
        group_attribs,
        VGroup(Item('figure',
                    editor=MPLFigureEditor(),
                    show_label=False,
                    resizable=True),
               id='mk.figure.view'),
    ),
                            id='mk.view',
                            buttons=['OK', 'Cancel'],
                            resizable=True,
                            width=600,
                            height=400)
Ejemplo n.º 14
0
class SPIRRID(HasTraits):
    '''Multidimensional statistical integration.
    
    Its name SPIRRID is an acronym for 
    Set of Parallel Independent Random Responses with Identical Distributions
    
    The package implements the evaluation of an integral over a set of 
    random variables affecting a response function RF and distributed 
    according to a probabilistic distribution PDistrib.
    
    The input parameters are devided in four categories in order
    to define state consistency of the evaluation. The outputs 
    are define as cached properties that are reevaluated in response
    to changes in the inputs.
    
    The following events accummulate changes in the input parameters of spirrid:
    rf_change - change in the response function
    rand_change - change in the randomization
    conf_change - change in the configuration of the algorithm
    eps_change - change in the studied range of the process control variable       
    '''
    #--------------------------------------------------------------------
    # Response function
    #--------------------------------------------------------------------
    #
    rf = Instance(IRF)

    def _rf_changed(self):
        self.on_trait_change(self._set_rf_change, 'rf.changed')
        self.rv_dict = {}

    #--------------------------------------------------------------------
    # Specification of random parameters
    #--------------------------------------------------------------------
    #
    rv_dict = Dict

    def add_rv(self,
               variable,
               distribution='uniform',
               loc=0.,
               scale=1.,
               shape=1.,
               n_int=30):
        '''Declare a variable as random 
        '''
        if variable not in self.rf.param_keys:
            raise AssertionError('parameter %s not defined by the response function' \
                % variable)

        params_with_distr = self.rf.traits(
            distr=lambda x: type(x) == ListType and distribution in x)
        if variable not in params_with_distr:
            raise AssertionError('distribution type %s not allowed for parameter %s' \
                % ( distribution, variable ))

        # @todo - let the RV take care of PDistrib specification.
        # isolate the dirty two-step definition of the distrib from spirrid
        #
        pd = PDistrib(distr_choice=distribution, n_segments=n_int)
        pd.distr_type.set(scale=scale, shape=shape, loc=loc)
        self.rv_dict[variable] = RV(name=variable, pd=pd, n_int=n_int)

    def del_rv(self, variable):
        '''Delete declaration of random variable
        '''
        del self.rv_dict[variable]

    def clear_rv(self):
        self.rv_dict = {}

    # subsidiary methods for sorted access to the random variables.
    # (note dictionary has not defined order of its items)
    rv_keys = Property(List, depends_on='rv_dict')

    @cached_property
    def _get_rv_keys(self):
        rv_keys = sorted(self.rv_dict.keys())
        # the random variable gets an index based on the
        # sorted keys
        for idx, key in enumerate(rv_keys):
            self.rv_dict[key].idx = idx
        return rv_keys

    rv_list = Property(List, depends_on='rv_dict')

    @cached_property
    def _get_rv_list(self):
        return list(map(self.rv_dict.get, self.rv_keys))

    #--------------------------------------------------------------------
    # Define which changes in the response function and in the
    # statistical parameters are relevant for reevaluation of the response
    #--------------------------------------------------------------------
    rf_change = Event

    @on_trait_change('rf.changed')
    def _set_rf_change(self):
        self.rf_change = True

    rand_change = Event

    @on_trait_change('rv_dict, rv_dict.changed')
    def _set_rand_change(self):
        self.rand_change = True

    conf_change = Event

    @on_trait_change('+alg_option')
    def _set_conf_change(self):
        self.conf_change = True

    eps_change = Event

    @on_trait_change('+eps_range')
    def _set_eps_change(self):
        self.eps_change = True

    # Dictionary with key = rf parameters
    # and values = default param values for the resp func
    #
    param_dict = Property(Dict, depends_on='rf_change, rand_change')

    @cached_property
    def _get_param_dict(self):
        '''Gather all the traits with the metadata distr specified.
        '''
        dict = {}
        for name, value in zip(self.rf.param_keys, self.rf.param_values):
            rv = self.rv_dict.get(name, None)
            if rv == None:
                dict[name] = value
            else:
                dict[name] = self.theta_ogrid[rv.idx]
        return dict

    ##### - experimental #####
    # @deprecated: ful coverage of the sampling domain - for orientation
    full_theta_arr_list = Property(depends_on='rf_change, rand_change')

    @cached_property
    def _get_full_theta_arr_list(self):
        '''Get list of arrays with both deterministic and statistic arrays.
        '''
        param_arr_list = [
            array([value], dtype='float_') for value in self.rf.param_values
        ]
        for idx, name in enumerate(self.rf.param_keys):
            rv = self.rv_dict.get(name, None)
            if rv:
                param_arr_list[idx] = rv.theta_arr
        return param_arr_list

    def get_rvs_theta_arr(self, n_samples):
        rvs_theta_arr = array(
            [repeat(value, n_samples) for value in self.rf.param_values])
        for idx, name in enumerate(self.rf.param_keys):
            rv = self.rv_dict.get(name, None)
            if rv:
                rvs_theta_arr[idx, :] = rv.get_rvs_theta_arr(n_samples)
        return rvs_theta_arr

    # Constant parameters
    #
    const_param_dict = Property(Dict, depends_on='rf_change, rand_change')

    @cached_property
    def _get_const_param_dict(self):
        const_param_dict = {}
        for name, v in zip(self.rf.param_keys, self.rf.param_values):
            if name not in self.rv_keys:
                const_param_dict[name] = v
        return const_param_dict

    # List of discretized statistical domains
    #
    theta_arr_list = Property(depends_on='rf_change, rand_change')

    @cached_property
    def _get_theta_arr_list(self):
        '''Get list of arrays with discretized RVs.
        '''
        return [rv.theta_arr for rv in self.rv_list]

    # Discretized statistical domain
    #
    theta_ogrid = Property(depends_on='rf_change, rand_change')

    @cached_property
    def _get_theta_ogrid(self):
        '''Get orthogonal list of arrays with discretized RVs.
        '''
        return orthogonalize(self.theta_arr_list)

    #---------------------------------------------------------------------------------
    # PDF arrays oriented in enumerated dimensions - broadcasting possible
    #---------------------------------------------------------------------------------
    pdf_ogrid = Property(depends_on='rf_change, rand_change')

    @cached_property
    def _get_pdf_ogrid(self):
        '''Get orthogonal list of arrays with PDF values of RVs.
        '''
        pdf_arr_list = [rv.pdf_arr for rv in self.rv_list]
        return orthogonalize(pdf_arr_list)

    #---------------------------------------------------------------------------------
    # PDF * Theta arrays oriented in enumerated dimensions - broadcasting possible
    #---------------------------------------------------------------------------------
    dG_ogrid = Property(depends_on='rf_change, rand_change')

    @cached_property
    def _get_dG_ogrid(self):
        '''Get orthogonal list of arrays with PDF * Theta product of.
        '''
        dG_arr_list = [rv.dG_arr for rv in self.rv_list]
        return orthogonalize(dG_arr_list)

    #---------------------------------------------------------------------------------
    # PDF grid - mutually multiplied arrays of PDF
    #---------------------------------------------------------------------------------
    dG_grid = Property(depends_on='rf_change, rand_change')

    @cached_property
    def _get_dG_grid(self):
        if len(self.dG_ogrid):
            return reduce(lambda x, y: x * y, self.dG_ogrid)
        else:
            return 1.0

    #------------------------------------------------------------------------------------
    # Configuration of the algorithm
    #------------------------------------------------------------------------------------
    #
    # cached_dG_grid:
    # If set to True, the cross product between the pdf values of all random variables
    # will be precalculated and stored in an n-dimensional grid
    # otherwise the product is performed for every epsilon in the inner loop anew
    #
    cached_dG = Bool(True, alg_option=True)

    # compiled_eps_loop:
    # If set True, the loop over the control variable epsilon is compiled
    # otherwise, python loop is used.
    compiled_eps_loop = Bool(True, alg_option=True)

    # compiled_QdG_loop:
    # If set True, the integration loop over the product between the response function
    # and the pdf . theta product is performed in c
    # otherwise the numpy arrays are used.
    compiled_QdG_loop = Bool(True, alg_option=True)

    def _compiled_QdG_loop_changed(self):
        '''If the inner loop is not compiled, the outer loop 
        must not be compiled as well.
        '''
        if self.compiled_QdG_loop == False:
            self.compiled_eps = False

    arg_list = Property(depends_on='rf_change, rand_change, conf_change')

    @cached_property
    def _get_arg_list(self):

        arg_list = []
        # create argument string for inline function
        if self.compiled_eps_loop:
            arg_list += ['mu_q_arr', 'e_arr']
        else:
            arg_list.append('e')

        arg_list += ['%s_flat' % name for name in self.rv_keys]

        if self.cached_dG:
            arg_list += ['dG_grid']
        else:
            arg_list += ['%s_pdf' % name for name in self.rv_keys]

        return arg_list

    C_code_qg = Property(depends_on='rf_change, rand_change, conf_change')

    @cached_property
    def _get_C_code_qg(self):
        if self.cached_dG:  # q_g - blitz matrix used to store the grid
            code_str = '\tdouble pdf = dG_grid(' + \
                       ','.join( [ 'i_%s' % name
                                  for name in self.rv_keys ] ) + \
                       ');\n'
        else:  # qg
            code_str = '\tdouble pdf = ' + \
                       '*'.join( [ ' *( %s_pdf + i_%s)' % ( name, name )
                                  for name in self.rv_keys ] ) + \
                       ';\n'
        return code_str

    #------------------------------------------------------------------------------------
    # Configurable generation of C-code for mean curve evaluation
    #------------------------------------------------------------------------------------
    C_code = Property(
        depends_on='rf_change, rand_change, conf_change, eps_change')

    @cached_property
    def _get_C_code(self):

        code_str = ''
        if self.compiled_eps_loop:

            # create code string for inline function
            #
            code_str += 'for( int i_eps = 0; i_eps < %g; i_eps++){\n' % self.n_eps

            if self.cached_dG:

                # multidimensional index needed for dG_grid
                # use blitz arrays must be used also for other arrays
                #
                code_str += 'double eps = e_arr( i_eps );\n'

            else:
                # pointer access possible for single dimensional arrays
                # use the pointer arithmetics for accessing the pdfs
                code_str += '\tdouble eps = *( e_arr + i_eps );\n'

        else:

            # create code string for inline function
            #
            code_str += 'double eps = e;\n'

        code_str += 'double mu_q(0);\n'
        code_str += 'double q(0);\n'

        code_str += '#line 100\n'
        # create code for constant params
        for name, value in list(self.const_param_dict.items()):
            code_str += 'double %s = %g;\n' % (name, value)

        # generate loops over random params

        for rv in self.rv_list:

            name = rv.name
            n_int = rv.n_int

            # create the loop over the random variable
            #
            code_str += 'for( int i_%s = 0; i_%s < %g; i_%s++){\n' % (
                name, name, n_int, name)
            if self.cached_dG:

                # multidimensional index needed for pdf_grid - use blitz arrays
                #
                code_str += '\tdouble %s = %s_flat( i_%s );\n' % (name, name,
                                                                  name)
            else:

                # pointer access possible for single dimensional arrays
                # use the pointer arithmetics for accessing the pdfs
                code_str += '\tdouble %s = *( %s_flat + i_%s );\n' % (
                    name, name, name)

        if len(self.rv_keys) > 0:
            code_str += self.C_code_qg
            code_str += self.rf.C_code + \
                       '// Store the values in the grid\n' + \
                       '\tmu_q +=  q * pdf;\n'
        else:
            code_str += self.rf.C_code + \
                       '\tmu_q += q;\n'

        # close the random loops
        #
        for name in self.rv_keys:
            code_str += '};\n'

        if self.compiled_eps_loop:
            if self.cached_dG:  # blitz matrix
                code_str += 'mu_q_arr(i_eps) = mu_q;\n'
            else:
                code_str += '*(mu_q_arr + i_eps) = mu_q;\n'
            code_str += '};\n'
        else:
            code_str += 'return_val = mu_q;'
        return code_str

    eps_grid_shape = Property(depends_on='eps_change')

    @cached_property
    def _get_eps_grid_shape(self):
        return tuple([len(eps) for eps in self.eps_list])

    eps_list = Property(depends_on='eps_change')

    @cached_property
    def _get_eps_list(self):
        ctrl_list = self.rf.ctrl_traits
        # generate the slices to produce the grid of the control values
        eps_list = [linspace(*cv.ctrl_range) for cv in ctrl_list]
        # produce the tuple of expanded arrays with n-dimensions - values broadcasted
        return eps_list

    eps_grid = Property(depends_on='eps_change')

    @cached_property
    def _get_eps_grid(self):
        '''Generate the grid of control variables.
        The array can be multidimensional depending on the dimension
        of the input variable of the current response function.
        '''
        ctrl_list = self.rf.ctrl_traits
        # generate the slices to produce the grid of the control values
        slices = [
            slice(cv.ctrl_range[0], cv.ctrl_range[1],
                  complex(0, cv.ctrl_range[2])) for cv in ctrl_list
        ]
        # produce the tuple of expanded arrays with n-dimensions - values broadcasted
        return mgrid[tuple(slices)]

    eps_arr = Property(depends_on='eps_change')

    @cached_property
    def _get_eps_arr(self):
        '''
        flatten the arrays and order them as columns of an array containing all combinations
        of the control variable values.
        '''
        return c_[tuple([eps_arr.flatten() for eps_arr in self.eps_grid])]

    compiler_verbose = Int(0)
    compiler = Str('gcc')

    def _eval(self):
        '''Evaluate the integral based on the configuration of algorithm.
        '''

        if self.cached_dG == False and self.compiled_QdG_loop == False:
            raise NotImplementedError(
                'Configuration for pure Python integration is too slow and is not implemented'
            )

        self._set_compiler()
        # prepare the array of the control variable discretization
        #
        eps_arr = self.eps_arr
        mu_q_arr = zeros((eps_arr.shape[0], ), dtype='float_')

        # prepare the parameters for the compiled function in
        # a separate dictionary
        c_params = {}

        if self.compiled_eps_loop:

            # for compiled eps_loop the whole input and output array must be passed to c
            #
            c_params['e_arr'] = eps_arr
            c_params['mu_q_arr'] = mu_q_arr
            #c_params['n_eps' ] = n_eps

        if self.compiled_QdG_loop:

            # prepare the lengths of the arrays to set the iteration bounds
            #
            for rv in self.rv_list:
                c_params['%s_flat' % rv.name] = rv.theta_arr

        if len(self.rv_list) > 0:
            if self.cached_dG:
                c_params['dG_grid'] = self.dG_grid
            else:
                for rv in self.rv_list:
                    c_params['%s_pdf' % rv.name] = rv.dG_arr
        else:
            c_params['dG_grid'] = self.dG_grid

        if self.cached_dG:
            conv = converters.blitz
        else:
            conv = converters.default

        t = time.clock()

        if self.compiled_eps_loop:

            # C loop over eps, all inner loops must be compiled as well
            #
            inline(self.C_code,
                   self.arg_list,
                   local_dict=c_params,
                   type_converters=conv,
                   compiler=self.compiler,
                   verbose=self.compiler_verbose)

        else:

            # Python loop over eps
            #
            for idx, e in enumerate(eps_arr):

                if self.compiled_QdG_loop:

                    # C loop over random dimensions
                    #
                    c_params['e'] = e  # prepare the parameter
                    mu_q = inline(self.C_code,
                                  self.arg_list,
                                  local_dict=c_params,
                                  type_converters=conv,
                                  compiler=self.compiler,
                                  verbose=self.compiler_verbose)
                else:

                    # Numpy loops over random dimensions
                    #
                    # get the rf grid for all combinations of
                    # parameter values
                    #
                    Q_grid = self.rf(*e, **self.param_dict)

                    # multiply the response grid with the contributions
                    # of pdf distributions (weighted by the delta of the
                    # random variable disretization)
                    #
                    Q_grid *= self.dG_grid

                    # sum all the values to get the integral
                    mu_q = sum(Q_grid)

                # add the value to the return array
                mu_q_arr[idx] = mu_q

        duration = time.clock() - t

        return mu_q_arr, duration

    def eval_i_dG_grid(self):
        '''Get the integral of the pdf * theta grid.
        '''
        return sum(self.dG_grid)

    def _eval_mu_q(self):
        # configure eval and call it
        pass

    def _eval_stdev_q(self):
        # configure eval and call it
        pass

    #--------------------------------------------------------------------------------------------
    # Numpy implementation
    #--------------------------------------------------------------------------------------------
    def get_rf(self, eps):
        '''
        Numpy based evaluation of the response function.
        '''
        return self.rf(eps, **self.param_dict)

    #---------------------------------------------------------------------------------------------
    # Output properties
    #---------------------------------------------------------------------------------------------

    # container for the data obtained in the integration
    #
    # This is not only the mean curve but also stdev and
    # execution statistics. Such an implementation
    # concentrates the critical part of the algorithmic
    # evaluation and avoids duplication of code and
    # repeated calls. The results are cached in the tuple.
    # They are accessed by the convenience properties defined
    # below.
    #
    results = Property(
        depends_on='rf_change, rand_change, conf_change, eps_change')

    @cached_property
    def _get_results(self):
        return self._eval()

    #---------------------------------------------------------------------------------------------
    # Output accessors
    #---------------------------------------------------------------------------------------------
    # the properties that access the cached results and give them a name

    mu_q_arr = Property()

    def _get_mu_q_arr(self):
        return self.results[0]

    mu_q_grid = Property()

    def _get_mu_q_grid(self):
        return self.mu_q_arr.reshape(self.eps_grid_shape)

    exec_time = Property()

    def _get_exec_time(self):
        '''Execution time of the last evaluation.
        '''
        return self.results[1]

    mean_curve = Property()

    def _get_mean_curve(self):
        '''Mean response curve.
        '''
        return MFnLineArray(xdata=self.eps_arr, ydata=self.mu_q_arr)

    mu_q_peak_idx = Property()

    def _get_mu_q_peak_idx(self):
        '''Get mean peak response value'''
        return argmax(self.mu_q_arr)

    mu_q_peak = Property()

    def _get_mu_q_peak(self):
        '''Get mean peak response value'''
        return self.mu_q_arr[self.mu_q_peak_idx]

    eps_at_peak = Property()

    def _get_eps_at_peak(self):
        '''Get strain at maximum middle response mu_q
        '''
        return self.eps_arr[self.mu_q_peak_idx]

    stdev_mu_q_peak = Property()

    def _get_stdev_mu_q_peak(self):
        '''
        Numpy based evaluation of the time integral.
        '''
        mu_q_peak = self.mu_q_peak
        eps_at_peak = self.eps_at_peak

        q_quad_grid = self.get_rf(eps_at_peak)**2
        q_quad_grid *= self.dG_grid
        q_quad_peak = sum(q_quad_grid)
        stdev_mu_q_peak = sqrt(q_quad_peak - mu_q_peak**2)

        return stdev_mu_q_peak

    #---------------------------------------------------------------------------------------------
    # Auxiliary methods
    #---------------------------------------------------------------------------------------------
    def _set_compiler(self):
        '''Catch eventual mismatch between scipy.weave and compiler 
        '''
        try:
            uname = os.uname()[3]
        except:
            # it is not Linux - just let it go and suffer
            return

        #if self.compiler == 'gcc':
        #os.environ['CC'] = 'gcc-4.1'
        #os.environ['CXX'] = 'g++-4.1'
        #os.environ['OPT'] = '-DNDEBUG -g -fwrapv -O3'

    traits_view = View(
        Item('rf@', show_label=False),
        width=0.3,
        height=0.3,
        resizable=True,
        scrollable=True,
    )
Ejemplo n.º 15
0
class ECBLMNDiagram(HasTraits):

    # calibrator supplying the effective material law
    calib = Instance(ECBLCalib)

    def _calib_default(self):
        return ECBLCalib(notify_change=self.set_modified)

    def _calib_changed(self):
        self.calib.notify_change = self.set_modified

    modified = Event

    def set_modified(self):
        print 'MN:set_modifeid'
        self.modified = True

    # cross section
    cs = DelegatesTo('calib')

    calibrated_ecb_law = Property(depends_on='modified')

    @cached_property
    def _get_calibrated_ecb_law(self):
        print 'NEW CALIBRATION'
        return self.calib.calibrated_ecb_law

    eps_cu = Property()

    def _get_eps_cu(self):
        return -self.cs.cc_law.eps_c_u

    eps_tu = Property()

    def _get_eps_tu(self):
        return self.calibrated_ecb_law.eps_tex_u

    n_eps = Int(5, auto_set=False, enter_set=True)
    eps_range = Property(depends_on='n_eps')

    @cached_property
    def _get_eps_range(self):
        eps_c_space = np.linspace(self.eps_cu, 0, self.n_eps)
        eps_t_space = np.linspace(0, self.eps_tu, self.n_eps)

        eps_ccu = 0.8 * self.eps_cu

        #eps_cc = self.eps_cu * np.ones_like(eps_c_space)
        eps_cc = np.linspace(eps_ccu, self.eps_cu, self.n_eps)
        eps_ct = self.eps_cu * np.ones_like(eps_t_space)
        eps_tc = self.eps_tu * np.ones_like(eps_c_space)
        eps_tt = self.eps_tu * np.ones_like(eps_t_space)

        eps1 = np.vstack([eps_c_space, eps_cc])
        eps2 = np.vstack([eps_t_space, eps_ct])
        eps3 = np.vstack([eps_tc, eps_c_space])
        eps4 = np.vstack([eps_tt, eps_t_space])

        return np.hstack([eps1, eps2, eps3, eps4])

    n_eps_range = Property(depends_on='n_eps')

    @cached_property
    def _get_n_eps_range(self):
        return self.eps_range.shape[1]

    #===========================================================================
    # MN Diagram
    #===========================================================================

    def _get_MN_fn(self, eps_lo, eps_up):
        self.cs.set(eps_lo=eps_lo, eps_up=eps_up)
        return (self.cs.M, self.cs.N)

    MN_vct = Property(depends_on='modified')

    def _get_MN_vct(self):
        return np.vectorize(self._get_MN_fn)

    MN_arr = Property(depends_on='modified')

    @cached_property
    def _get_MN_arr(self):
        return self.MN_vct(self.eps_range[0, :], self.eps_range[1, :])

    #===========================================================================
    # f_eps Diagram
    #===========================================================================

    current_eps_idx = Int(0)  # , auto_set = False, enter_set = True)

    def _current_eps_idx_changed(self):
        self._clear_fired()
        self._replot_fired()

    current_eps = Property(depends_on='current_eps_idx')

    @cached_property
    def _get_current_eps(self):
        return self.eps_range[(0, 1), self.current_eps_idx]

    current_MN = Property(depends_on='current_eps_idx')

    @cached_property
    def _get_current_MN(self):
        return self._get_MN_fn(*self.current_eps)

    #===========================================================================
    # Plotting
    #===========================================================================

    figure = Instance(Figure)

    def _figure_default(self):
        figure = Figure(facecolor='white')
        figure.add_axes([0.08, 0.13, 0.85, 0.74])
        return figure

    data_changed = Event

    clear = Button

    def _clear_fired(self):
        self.figure.clear()
        self.data_changed = True

    replot = Button

    def _replot_fired(self):

        ax = self.figure.add_subplot(2, 2, 1)

        ax.plot(-self.eps_range, [0, 0.06], color='black')

        ax.plot(-self.current_eps, [0, 0.06], lw=3, color='red')

        ax.spines['left'].set_position('zero')
        ax.spines['right'].set_color('none')
        ax.spines['top'].set_color('none')
        ax.spines['left'].set_smart_bounds(True)
        ax.spines['bottom'].set_smart_bounds(True)
        ax.xaxis.set_ticks_position('bottom')
        ax.yaxis.set_ticks_position('left')

        ax = self.figure.add_subplot(2, 2, 2)

        ax.plot(self.MN_arr[0], -self.MN_arr[1], lw=2, color='blue')

        ax.plot(self.current_MN[0],
                -self.current_MN[1],
                'g.',
                markersize=20.0,
                color='red')

        ax.spines['left'].set_position('zero')
        ax.spines['bottom'].set_position('zero')
        ax.spines['right'].set_color('none')
        ax.spines['top'].set_color('none')
        ax.spines['left'].set_smart_bounds(True)
        ax.spines['bottom'].set_smart_bounds(True)
        ax.xaxis.set_ticks_position('bottom')
        ax.yaxis.set_ticks_position('left')
        ax.grid(b=None, which='major')

        self.cs.set(eps_lo=self.current_eps[0], eps_up=self.current_eps[1])

        ax = self.figure.add_subplot(2, 2, 3)

        self.cs.plot_eps(ax)

        ax = self.figure.add_subplot(2, 2, 4)

        self.cs.plot_sig(ax)

        self.data_changed = True

    view = View(HSplit(
        Group(
            HGroup(
                Group(Item('n_eps', springy=True),
                      label='Discretization',
                      springy=True),
                springy=True,
            ),
            HGroup(
                Group(VGroup(
                    Item(
                        'cs',
                        label='Cross section',
                        show_label=False,
                        springy=True,
                        editor=InstanceEditor(kind='live'),
                    ),
                    Item(
                        'calib',
                        label='Calibration',
                        show_label=False,
                        springy=True,
                        editor=InstanceEditor(kind='live'),
                    ),
                    springy=True,
                ),
                      label='Cross sectoin',
                      springy=True),
                springy=True,
            ),
            scrollable=True,
        ),
        Group(
            HGroup(
                Item('replot', show_label=False),
                Item('clear', show_label=False),
            ),
            Item(
                'current_eps_idx',
                editor=RangeEditor(
                    low=0,
                    high_name='n_eps_range',
                    format='(%s)',
                    mode='slider',
                    auto_set=False,
                    enter_set=False,
                ),
                show_label=False,
            ),
            Item('figure',
                 editor=MPLFigureEditor(),
                 resizable=True,
                 show_label=False),
            id='simexdb.plot_sheet',
            label='plot sheet',
            dock='tab',
        ),
    ),
                width=1.0,
                height=0.8,
                resizable=True,
                buttons=['OK', 'Cancel'])
Ejemplo n.º 16
0
class YMBAutoCorrelView(HasTraits):

    correl_data = Instance(YMBAutoCorrel)

    axes_adjust = List([0.1, 0.1, 0.8, 0.8])

    data = Property
    def _get_data(self):
        return self.correl_data.data

    zero = Constant(0)
    slider_max = Property()
    def _get_slider_max(self):
        return self.data.n_cuts - 1

    cut_slider = Range('zero', 'slider_max', mode='slider', auto_set=False, enter_set=True, modified=True)
    vcut_slider = Range('zero', 'slider_max', mode='slider', auto_set=False, enter_set=True, modified=True)

    cut_slider_on = Bool(False, modified=True)

    color = Str('blue')

    figure = Instance(Figure)

    def _figure_default(self):
        figure = Figure()
        figure.add_axes(self.axes_adjust)
        return figure

    data_changed = Event(True)
    @on_trait_change('correl_data.input_change, +modified')
    def _redraw(self):
        # TODO: set correct ranges, fix axis range (axes.xlim)
        print 'redrawing xxxx'
        figure = self.figure
        figure.clear()
        var_data = self.correl_data.corr_arr
        id = self.cut_slider
        if self.cut_slider_on == True:
            i = self.cut_slider
            j = self.vcut_slider
            plot_data = getattr(self.data, self.correl_data.var_enum_)
            # plot_data = vstack( [plot_data[:, i], plot_data[:, j]] ).T
            # plot only values > -1
            # plot_data = plot_data[prod( plot_data >= 0, axis = 1, dtype = bool )]
            plot_data_x = plot_data[:, i]
            plot_data_y = plot_data[:, j]
            plot_data_corr = min(corrcoef(plot_data_x, plot_data_y))
            plot_data_corr_spear = spearmanr(plot_data_x, plot_data_y)[0]

            left, width = 0.1, 0.65
            bottom, height = 0.1, 0.65
            bottom_h = left_h = left + width + 0.02

            rect_scatter = [left, bottom, width, height]
            rect_histx = [left, bottom_h, width, 0.2]
            rect_histy = [left_h, bottom, 0.2, height]

            axScatter = figure.add_axes(rect_scatter)
            axHistx = figure.add_axes(rect_histx)
            axHisty = figure.add_axes(rect_histy)
            axScatter.clear()
            axHistx.clear()
            axHisty.clear()

            from matplotlib.ticker import NullFormatter
            axHistx.xaxis.set_major_formatter(NullFormatter())
            axHisty.yaxis.set_major_formatter(NullFormatter())

            axScatter.scatter(plot_data_x,
                               plot_data_y)

            # binwidth = 0.25
            # xymax = max( [max( abs( self.data.cf[:, j] ) ), max( abs( self.data.cf[:, i] ) )] )
            # lim = ( int( xymax / binwidth ) + 1 ) * binwidth

            # axScatter.set_xlim( ( -lim, lim ) )
            # axScatter.set_ylim( ( -lim, lim ) )

            # bins = arange( -lim, lim + binwidth, binwidth )
            axHistx.hist(plot_data_x.compressed(), bins=40)
            axHisty.hist(plot_data_y.compressed(), bins=40, orientation='horizontal')
            axHistx.set_xlim(axScatter.get_xlim())
            axHisty.set_ylim(axScatter.get_ylim())

            axScatter.set_xlabel('$\mathrm{cut\, %i}$' % self.cut_slider, fontsize=16)
            axScatter.set_ylabel('$\mathrm{cut\, %i}$' % self.vcut_slider, fontsize=16)
            axScatter.text(axScatter.get_xlim()[0], axScatter.get_ylim()[0],
                             'actual set correlation %.3f (Pearson), %.3f (Spearman)' % (plot_data_corr, plot_data_corr_spear), color='r')

        if self.cut_slider_on == False:
            figure.add_axes(self.axes_adjust)
            axes = figure.axes[0]
            axes.clear()
            x_coor = self.data.x_coord
            axes.grid()
            for i in range(0, var_data.shape[1]):
                axes.plot(x_coor[i:] - x_coor[i],
                           var_data[i, (i):], '-x', color=self.color)
            # approximate by the polynomial (of the i-th order)
            # axes.plot( x_coor, self.correl_data.peval( x_coor, self.correl_data.fit_correl ), 'b', linewidth = 3 )
            setp(axes.get_xticklabels(), position=(0, -.025))
            axes.set_xlabel('$x \, [\mathrm{mm}]$', fontsize=15)
            axes.set_ylabel('$\mathrm{correlation}$', fontsize=15)
            axes.set_ylim(-1, 1)

        self.data_changed = True


    traits_view = View(Group(Item('correl_data', show_label=False, style='custom'),
                              HGroup(
                       Item('cut_slider_on', label='Scatter'),
                       Item('cut_slider', show_label=False, springy=True, enabled_when='cut_slider_on == True'),
                       Item('vcut_slider', show_label=False, springy=True, enabled_when='cut_slider_on == True'),
                       ),
                       Group(Item('figure', style='custom',
                              editor=MPLFigureEditor(),
                              show_label=False)
                              , id='figure.view'),
                       ),
                       resizable=True,
                        )
Ejemplo n.º 17
0
class CompositeCrackBridge(HasTraits):

    reinforcement_lst = List(Instance(Reinforcement))
    w = Float
    E_m = Float
    Ll = Float
    Lr = Float
    damage_initial_value = Array

    V_f_tot = Property(depends_on='reinforcement_lst+')

    @cached_property
    def _get_V_f_tot(self):
        V_f_tot = 0.0
        for reinf in self.reinforcement_lst:
            V_f_tot += reinf.V_f
        return V_f_tot

    E_c = Property(depends_on='reinforcement_lst+')

    @cached_property
    def _get_E_c(self):
        E_fibers = 0.0
        for reinf in self.reinforcement_lst:
            E_fibers += reinf.V_f * reinf.E_f
        E_c = self.E_m * (1. - self.V_f_tot) + E_fibers
        return E_c * (1. + 1e-15)

    sorted_theta = Property(depends_on='reinforcement_lst+')

    @cached_property
    def _get_sorted_theta(self):
        '''sorts the integral points by bond in descending order'''
        depsf_arr = np.array([])
        V_f_arr = np.array([])
        E_f_arr = np.array([])
        xi_arr = np.array([])
        stat_weights_arr = np.array([])
        nu_r_arr = np.array([])
        r_arr = np.array([])
        for reinf in self.reinforcement_lst:
            n_int = len(np.hstack((np.array([]), reinf.depsf_arr)))
            depsf_arr = np.hstack((depsf_arr, reinf.depsf_arr))
            V_f_arr = np.hstack((V_f_arr, np.repeat(reinf.V_f, n_int)))
            E_f_arr = np.hstack((E_f_arr, np.repeat(reinf.E_f, n_int)))
            xi_arr = np.hstack((xi_arr, np.repeat(reinf.xi, n_int)))
            #            stat_weights_arr = np.hstack((stat_weights_arr,
            #                                          np.repeat(reinf.stat_weights, n_int)))
            stat_weights_arr = np.hstack(
                (stat_weights_arr, reinf.stat_weights))
            nu_r_arr = np.hstack((nu_r_arr, reinf.nu_r))
            r_arr = np.hstack((r_arr, reinf.r_arr))
        argsort = np.argsort(depsf_arr)[::-1]
        # sorting the masks for the evaluation of F
        idxs = np.array([])
        for i, reinf in enumerate(self.reinforcement_lst):
            idxs = np.hstack((idxs, i * np.ones_like(reinf.depsf_arr)))
        masks = []
        for i, reinf in enumerate(self.reinforcement_lst):
            masks.append((idxs == i)[argsort])
        max_depsf = [
            np.max(reinf.depsf_arr) for reinf in self.reinforcement_lst
        ]
        masks = [masks[i] for i in np.argsort(max_depsf)[::-1]]
        return depsf_arr[argsort], V_f_arr[argsort], E_f_arr[argsort], \
                xi_arr[argsort], stat_weights_arr[argsort], \
                nu_r_arr[argsort], masks, r_arr[argsort]

    sorted_depsf = Property(depends_on='reinforcement_lst+')

    @cached_property
    def _get_sorted_depsf(self):
        return self.sorted_theta[0]

    sorted_V_f = Property(depends_on='reinforcement_lst+')

    @cached_property
    def _get_sorted_V_f(self):
        return self.sorted_theta[1]

    sorted_E_f = Property(depends_on='reinforcement_lst+')

    @cached_property
    def _get_sorted_E_f(self):
        return self.sorted_theta[2]

    sorted_xi = Property(depends_on='reinforcement_lst+')

    @cached_property
    def _get_sorted_xi(self):
        return self.sorted_theta[3]

    sorted_stats_weights = Property(depends_on='reinforcement_lst+')

    @cached_property
    def _get_sorted_stats_weights(self):
        return self.sorted_theta[4]

    sorted_nu_r = Property(depends_on='reinforcement_lst+')

    @cached_property
    def _get_sorted_nu_r(self):
        return self.sorted_theta[5]

    sorted_masks = Property(depends_on='reinforcement_lst+')

    @cached_property
    def _get_sorted_masks(self):
        return self.sorted_theta[6]

    sorted_r = Property(depends_on='reinforcement_lst+')

    @cached_property
    def _get_sorted_r(self):
        return self.sorted_theta[7]

    sorted_xi_cdf = Property(depends_on='reinforcement_lst+,Ll,Lr')

    @cached_property
    def _get_sorted_xi_cdf(self):
        '''breaking strain: CDF for random and Heaviside for discrete values'''
        # TODO: does not work for reinforcement types with the same xi
        methods = []
        masks = []
        for reinf in self.reinforcement_lst:
            masks.append(self.sorted_xi == reinf.xi)
            if isinstance(reinf.xi, FloatType):
                methods.append(lambda x: 1.0 * (reinf.xi <= x))
            elif isinstance(reinf.xi, RV):
                methods.append(reinf.xi._distr.cdf)
            elif isinstance(reinf.xi, WeibullFibers):
                reinf.xi.Ll = self.Ll
                reinf.xi.Lr = self.Lr
                methods.append(reinf.xi.cdf)
        return methods, masks

    Kf = Property(depends_on='reinforcement_lst+')

    @cached_property
    def _get_Kf(self):
        return self.sorted_V_f * self.sorted_nu_r * \
                self.sorted_stats_weights * self.sorted_E_f

    def vect_xi_cdf(self, epsy, x_short, x_long):
        Pf = np.zeros_like(self.sorted_depsf)
        methods, masks = self.sorted_xi_cdf
        for i, method in enumerate(methods):
            if method.__doc__ == 'weibull_fibers_cdf_mc':
                Pf[masks[i]] += method(epsy[masks[i]],
                                       self.sorted_depsf[masks[i]],
                                       self.sorted_r[masks[i]],
                                       x_short[masks[i]], x_long[masks[i]])
            elif method.__doc__ == 'weibull_fibers_cdf_cb_elast':
                Pf[masks[i]] += method(epsy[masks[i]],
                                       self.sorted_depsf[masks[i]],
                                       self.sorted_r[masks[i]],
                                       x_short[masks[i]], x_long[masks[i]])
            else:
                Pf[masks[i]] += method(epsy[masks[i]])
        return Pf

    def dem_depsf_vect(self, damage):
        '''evaluates the deps_m given deps_f
        at that point and the damage array'''
        Kf_intact = self.Kf * (1. - damage)
        Kf_intact_bonded = np.hstack((0.0, np.cumsum((Kf_intact))))[:-1]
        Kf_broken = np.sum(self.Kf - Kf_intact)
        Kf_add = Kf_intact_bonded + Kf_broken
        Km = (1. - self.V_f_tot) * self.E_m
        E_mtrx = Km + Kf_add
        mu_T = np.cumsum((self.sorted_depsf * Kf_intact)[::-1])[::-1]
        return mu_T / E_mtrx

    def F(self, dems, amin):
        '''Auxiliary function (see Part II, appendix B)
        '''
        F = np.zeros_like(self.sorted_depsf)
        for i, mask in enumerate(self.sorted_masks):
            depsfi = self.sorted_depsf[mask]
            demsi = dems[mask]
            fi = 1. / (depsfi + demsi)
            F[mask] = np.hstack((np.array([0.0]), cumtrapz(fi, -depsfi)))
            if i == 0:
                C = 0.0
            else:
                depsf0 = self.sorted_depsf[self.sorted_masks[i - 1]]
                depsf1 = depsfi[0]
                idx = np.sum(depsf0 > depsf1) - 1
                depsf2 = depsf0[idx]
                a1 = np.exp(F[self.sorted_masks[i - 1]][idx] / 2. +
                            np.log(amin))
                p = depsf2 - depsf1
                q = depsf1 + demsi[0]
                amin_i = np.sqrt(a1**2 + p / q * a1**2)
                C = np.log(amin_i / amin)
            F[mask] += 2 * C
        return F

    def clamped(self, Lmin, Lmax, init_dem):
        a = np.hstack((-Lmin, 0.0, Lmax))
        em = np.hstack((init_dem * Lmin, 0.0, init_dem * Lmax))
        epsf0 = (self.sorted_depsf / 2. * (Lmin**2 + Lmax**2) + self.w +
                 em[0] * Lmin / 2. + em[-1] * Lmax / 2.) / (Lmin + Lmax)
        return a, em, epsf0

    def profile(self, iter_damage, Lmin, Lmax):
        '''
        '''
        # matrix strain derivative with resp. to z as a function of T
        dems = self.dem_depsf_vect(iter_damage)
        # initial matrix strain derivative
        init_dem = dems[0]
        # debonded length of fibers with Tmax
        amin = (self.w /
                (np.abs(init_dem) + np.abs(self.sorted_depsf[0])))**0.5
        # integrated f(depsf) - see article
        F = self.F(dems, amin)
        # a1 is a(depsf) for double sided pullout
        a1 = amin * np.exp(F / 2.)
        #aX = np.exp((-np.log(np.abs(self.sorted_depsf) + dems) + np.log(self.w)) / 2.)
        if Lmin < a1[0] and Lmax < a1[0]:
            # all fibers debonded up to Lmin and Lmax
            a, em, epsf0 = self.clamped(Lmin, Lmax, init_dem)

        elif Lmin < a1[0] and Lmax >= a1[0]:
            # all fibers debonded up to Lmin but not up to Lmax
            amin = -Lmin + np.sqrt(2 * Lmin**2 + 2 * self.w /
                                   (self.sorted_depsf[0] + init_dem))
            C = np.log(amin**2 + 2 * Lmin * amin - Lmin**2)
            a2 = np.sqrt(2 * Lmin**2 + np.exp((F + C))) - Lmin
            if Lmax < a2[0]:
                a, em, epsf0 = self.clamped(Lmin, Lmax, init_dem)
            else:
                if Lmax <= a2[-1]:
                    idx = np.sum(a2 < Lmax) - 1
                    a = np.hstack((-Lmin, 0.0, a2[:idx + 1], Lmax))
                    em2 = np.cumsum(np.diff(np.hstack((0.0, a2))) * dems)
                    em = np.hstack((init_dem * Lmin, 0.0, em2[:idx + 1],
                                    em2[idx] + (Lmax - a2[idx]) * dems[idx]))
                    um = np.trapz(em, a)
                    epsf01 = em2[:idx +
                                 1] + a2[:idx + 1] * self.sorted_depsf[:idx +
                                                                       1]
                    epsf02 = (self.w + um + self.sorted_depsf[idx + 1:] / 2. *
                              (Lmin**2 + Lmax**2)) / (Lmin + Lmax)
                    epsf0 = np.hstack((epsf01, epsf02))
                else:
                    a = np.hstack((-Lmin, 0.0, a2, Lmax))
                    em2 = np.cumsum(np.diff(np.hstack((0.0, a2))) * dems)
                    em = np.hstack((init_dem * Lmin, 0.0, em2, em2[-1]))
                    epsf0 = em2 + self.sorted_depsf * a2
        elif a1[0] < Lmin and a1[-1] > Lmin:
            # some fibers are debonded up to Lmin, some are not
            # boundary condition position
            idx1 = np.sum(a1 <= Lmin)
            # a(T) for one sided pullout
            # first debonded length amin for one sided PO
            depsfLmin = self.sorted_depsf[idx1]
            p = (depsfLmin + dems[idx1])
            a_short = np.hstack((a1[:idx1], Lmin))
            em_short = np.cumsum(
                np.diff(np.hstack((0.0, a_short))) * dems[:idx1 + 1])
            emLmin = em_short[-1]
            umLmin = np.trapz(np.hstack((0.0, em_short)),
                              np.hstack((0.0, a_short)))
            amin = -Lmin + np.sqrt(4 * Lmin**2 * p**2 - 4 * p * emLmin * Lmin +
                                   4 * p * umLmin - 2 * p * Lmin**2 *
                                   depsfLmin + 2 * p * self.w) / p
            C = np.log(amin**2 + 2 * amin * Lmin - Lmin**2)
            a2 = (np.sqrt(2 * Lmin**2 + np.exp(F + C - F[idx1])) - Lmin)[idx1:]
            # matrix strain profiles - shorter side
            a_short = np.hstack((-Lmin, -a1[:idx1][::-1], 0.0))
            dems_short = np.hstack((dems[:idx1], dems[idx1]))
            em_short = np.hstack(
                (0.0, np.cumsum(np.diff(-a_short[::-1]) * dems_short)))[::-1]
            if a2[-1] > Lmax:
                idx2 = np.sum(a2 <= Lmax)
                # matrix strain profiles - longer side
                a_long = np.hstack((a1[:idx1], a2[:idx2]))
                em_long = np.cumsum(
                    np.diff(np.hstack((0.0, a_long))) * dems[:idx1 + idx2])
                a = np.hstack((a_short, a_long, Lmax))
                em = np.hstack(
                    (em_short, em_long,
                     em_long[-1] + (Lmax - a_long[-1]) * dems[idx1 + idx2]))
                um = np.trapz(em, a)
                epsf01 = em_long + a_long * self.sorted_depsf[:idx1 + idx2]
                epsf02 = (self.w + um + self.sorted_depsf[idx1 + idx2:] / 2. *
                          (Lmin**2 + Lmax**2)) / (Lmin + Lmax)
                epsf0 = np.hstack((epsf01, epsf02))
            else:
                a_long = np.hstack((0.0, a1[:idx1], a2, Lmax))
                a = np.hstack((a_short, a_long[1:]))
                dems_long = dems
                em_long = np.hstack(
                    (np.cumsum(np.diff(a_long[:-1]) * dems_long)))
                em_long = np.hstack((em_long, em_long[-1]))
                em = np.hstack((em_short, em_long))
                epsf0 = em_long[:-1] + self.sorted_depsf * a_long[1:-1]
        elif a1[-1] <= Lmin:
            # double sided pullout
            a = np.hstack((-Lmin, -a1[::-1], 0.0, a1, Lmax))
            em1 = np.cumsum(np.diff(np.hstack((0.0, a1))) * dems)
            em = np.hstack((em1[-1], em1[::-1], 0.0, em1, em1[-1]))
            epsf0 = em1 + self.sorted_depsf * a1
        self._x_arr = a
        self._epsm_arr = em
        self._epsf0_arr = epsf0
        a_short = -a[a < 0.0][1:][::-1]
        if len(a_short) < len(self.sorted_depsf):
            a_short = np.hstack(
                (a_short,
                 Lmin * np.ones(len(self.sorted_depsf) - len(a_short))))
        a_long = a[a > 0.0][:-1]
        if len(a_long) < len(self.sorted_depsf):
            a_long = np.hstack(
                (a_long, Lmax * np.ones(len(self.sorted_depsf) - len(a_long))))
        return epsf0, a_short, a_long

    def damage_residuum(self, iter_damage):
        if np.any(iter_damage < 0.0) or np.any(iter_damage > 1.0):
            return np.ones_like(iter_damage) * 2.0
        else:
            Lmin = min(self.Ll, self.Lr)
            Lmax = max(self.Ll, self.Lr)
            epsf0, x_short, x_long = self.profile(iter_damage, Lmin, Lmax)
            residuum = self.vect_xi_cdf(epsf0, x_short=x_short,
                                        x_long=x_long) - iter_damage
            return residuum

    _x_arr = Array

    def __x_arr_default(self):
        return np.repeat(1e-10, len(self.sorted_depsf))

    _epsm_arr = Array

    def __epsm_arr_default(self):
        return np.repeat(1e-10, len(self.sorted_depsf))

    _epsf0_arr = Array

    def __epsf0_arr_default(self):
        return np.repeat(1e-10, len(self.sorted_depsf))

    damage = Property(depends_on='w, Ll, Lr, reinforcement+')

    @cached_property
    def _get_damage(self):
        if self.w == 0.:
            damage = np.zeros_like(self.sorted_depsf)
        else:
            ff = t.clock()
            try:

                damage = root(self.damage_residuum,
                              np.ones_like(self.sorted_depsf) * 0.2,
                              method='excitingmixing',
                              options={'maxiter': 100})
                if np.any(damage.x < 0.0) or np.any(damage.x > 1.0):
                    raise ValueError
                damage = damage.x
                self.damage_initial_value = damage
            except:
                print 'fast opt method does not converge: switched to a slower, robust method for this step'
                damage = root(self.damage_residuum,
                              np.ones_like(self.sorted_depsf) * 0.2,
                              method='krylov')
                damage = damage.x
            # print 'damage =', np.sum(damage) / len(damage), 'iteration time =', t.clock() - ff, 'sec'
        return damage
Ejemplo n.º 18
0
class PDistrib(HasTraits):

    implements = IPDistrib

    def __init__(self, **kw):
        super(PDistrib, self).__init__(**kw)
        self.on_trait_change(self.refresh,
                             'distr_type.changed,quantile,n_segments')
        self.refresh()

    # puts all chosen continuous distributions distributions defined
    # in the scipy.stats.distributions module as a list of strings
    # into the Enum trait
    # distr_choice = Enum(distr_enum)

    distr_choice = Enum('sin2x', 'weibull_min', 'sin_distr', 'uniform', 'norm',
                        'piecewise_uniform', 'gamma')
    distr_dict = {
        'sin2x': sin2x,
        'uniform': uniform,
        'norm': norm,
        'weibull_min': weibull_min,
        'sin_distr': sin_distr,
        'piecewise_uniform': piecewise_uniform,
        'gamma': gamma
    }

    # instantiating the continuous distributions
    distr_type = Property(Instance(Distribution), depends_on='distr_choice')

    @cached_property
    def _get_distr_type(self):
        return Distribution(self.distr_dict[self.distr_choice])

    # change monitor - accumulate the changes in a single event trait
    changed = Event

    @on_trait_change('distr_choice, distr_type.changed, quantile, n_segments')
    def _set_changed(self):
        self.changed = True

    #------------------------------------------------------------------------
    # Methods setting the statistical modments
    #------------------------------------------------------------------------
    mean = Property

    def _get_mean(self):
        return self.distr_type.mean

    def _set_mean(self, value):
        self.distr_type.mean = value

    variance = Property

    def _get_variance(self):
        return self.distr_type.mean

    def _set_variance(self, value):
        self.distr_type.mean = value

    #------------------------------------------------------------------------
    # Methods preparing visualization
    #------------------------------------------------------------------------

    quantile = Float(1e-14, auto_set=False, enter_set=True)
    range = Property(Tuple(Float), depends_on=\
                      'distr_type.changed, quantile')

    @cached_property
    def _get_range(self):
        return (self.distr_type.distr.ppf(self.quantile),
                self.distr_type.distr.ppf(1 - self.quantile))

    n_segments = Int(500, auto_set=False, enter_set=True)

    dx = Property(Float, depends_on=\
                      'distr_type.changed, quantile, n_segments')

    @cached_property
    def _get_dx(self):
        range_length = self.range[1] - self.range[0]
        return range_length / self.n_segments

    #-------------------------------------------------------------------------
    # Discretization of the distribution domain
    #-------------------------------------------------------------------------
    x_array = Property(Array('float_'), depends_on=\
                        'distr_type.changed,'\
                        'quantile, n_segments')

    @cached_property
    def _get_x_array(self):
        '''Get the intrinsic discretization of the distribution
        respecting its  bounds.
        '''
        return linspace(self.range[0], self.range[1], self.n_segments + 1)

    #===========================================================================
    # Access function to the scipy distribution
    #===========================================================================
    def pdf(self, x):
        return self.distr_type.distr.pdf(x)

    def cdf(self, x):
        return self.distr_type.distr.cdf(x)

    def rvs(self, n):
        return self.distr_type.distr.rvs(n)

    def ppf(self, e):
        return self.distr_type.distr.ppf(e)

    #===========================================================================
    # PDF - permanent array
    #===========================================================================

    pdf_array = Property(Array('float_'), depends_on=\
                                    'distr_type.changed,'\
                                     'quantile, n_segments')

    @cached_property
    def _get_pdf_array(self):
        '''Get pdf values in intrinsic positions'''
        return self.distr_type.distr.pdf(self.x_array)

    def get_pdf_array(self, x_array):
        '''Get pdf values in externally specified positions'''
        return self.distr_type.distr.pdf(x_array)

    #===========================================================================
    # CDF permanent array
    #===========================================================================
    cdf_array = Property(Array('float_'), depends_on=\
                                    'distr_type.changed,'\
                                     'quantile, n_segments')

    @cached_property
    def _get_cdf_array(self):
        '''Get cdf values in intrinsic positions'''
        return self.distr_type.distr.cdf(self.x_array)

    def get_cdf_array(self, x_array):
        '''Get cdf values in externally specified positions'''
        return self.distr_type.distr.cdf(x_array)

    #-------------------------------------------------------------------------
    # Randomization
    #-------------------------------------------------------------------------
    def get_rvs_array(self, n_samples):
        return self.distr_type.distr.rvs(n_samples)

    figure = Instance(Figure)

    def _figure_default(self):
        figure = Figure(facecolor='white')
        return figure

    data_changed = Event

    def plot(self, fig):
        figure = fig
        figure.clear()
        axes = figure.gca()
        # plot PDF
        axes.plot(self.x_array, self.pdf_array, lw=1.0, color='blue', \
                  label='PDF')
        axes2 = axes.twinx()
        # plot CDF on a separate axis (tick labels left)
        axes2.plot(self.x_array, self.cdf_array, lw=2, color='red', \
                  label='CDF')
        # fill the unity area given by integrating PDF along the X-axis
        axes.fill_between(self.x_array,
                          0,
                          self.pdf_array,
                          color='lightblue',
                          alpha=0.8,
                          linewidth=2)
        # plot mean
        mean = self.distr_type.distr.stats('m')
        axes.plot([mean, mean], [0.0, self.distr_type.distr.pdf(mean)],
                  lw=1.5,
                  color='black',
                  linestyle='-')
        # plot stdev
        stdev = sqrt(self.distr_type.distr.stats('v'))
        axes.plot([mean - stdev, mean - stdev],
                  [0.0, self.distr_type.distr.pdf(mean - stdev)],
                  lw=1.5,
                  color='black',
                  linestyle='--')
        axes.plot([mean + stdev, mean + stdev],
                  [0.0, self.distr_type.distr.pdf(mean + stdev)],
                  lw=1.5,
                  color='black',
                  linestyle='--')

        axes.legend(loc='center left')
        axes2.legend(loc='center right')
        axes.ticklabel_format(scilimits=(-3., 4.))
        axes2.ticklabel_format(scilimits=(-3., 4.))

        # plot limits on X and Y axes
        axes.set_ylim(0.0, max(self.pdf_array) * 1.15)
        axes2.set_ylim(0.0, 1.15)
        range = self.range[1] - self.range[0]
        axes.set_xlim(self.x_array[0] - 0.05 * range,
                      self.x_array[-1] + 0.05 * range)
        axes2.set_xlim(self.x_array[0] - 0.05 * range,
                       self.x_array[-1] + 0.05 * range)

    def refresh(self):
        self.plot(self.figure)
        self.data_changed = True

    icon = Property(Instance(ImageResource),
                    depends_on='distr_type.changed,quantile,n_segments')

    @cached_property
    def _get_icon(self):
        fig = plt.figure(figsize=(4, 4), facecolor='white')
        self.plot(fig)
        tf_handle, tf_name = tempfile.mkstemp('.png')
        fig.savefig(tf_name, dpi=35)
        return ImageResource(name=tf_name)

    traits_view = View(HSplit(VGroup(
        Group(
            Item('distr_choice', show_label=False),
            Item('@distr_type', show_label=False),
        ),
        id='pdistrib.distr_type.pltctrls',
        label='Distribution parameters',
        scrollable=True,
    ),
                              Tabbed(
                                  Group(
                                      Item('figure',
                                           editor=MPLFigureEditor(),
                                           show_label=False,
                                           resizable=True),
                                      scrollable=True,
                                      label='Plot',
                                  ),
                                  Group(Item('quantile', label='quantile'),
                                        Item('n_segments',
                                             label='plot points'),
                                        label='Plot parameters'),
                                  label='Plot',
                                  id='pdistrib.figure.params',
                                  dock='tab',
                              ),
                              dock='tab',
                              id='pdistrib.figure.view'),
                       id='pdistrib.view',
                       dock='tab',
                       title='Statistical distribution',
                       buttons=['Ok', 'Cancel'],
                       scrollable=True,
                       resizable=True,
                       width=600,
                       height=400)
Ejemplo n.º 19
0
class Randomization( HasTraits ):
    '''Multidimensional statistical integration.
    
    Its name SPIRRID is an acronym for 
    Set of Parallel Independent Random Responses with Identical Distributions
    
    The package implements the evaluation of an integral over a set of 
    random variables affecting a response function RF and distributed 
    according to a probabilistic distribution PDistrib.
    
    The input parameters are devided in four categories in order
    to define state consistency of the evaluation. The outputs 
    are define as cached properties that are reevaluated in response
    to changes in the inputs.
    
    The following events accummulate changes in the input parameters of spirrid:
    rf_change - change in the response function
    rand_change - change in the randomization
    conf_change - change in the configuration of the algorithm
    eps_change - change in the studied range of the process control variable       
    '''
    #--------------------------------------------------------------------
    # Response function 
    #--------------------------------------------------------------------
    #
    rf = Instance( IRF )
    def _rf_changed( self ):
        self.on_trait_change( self._set_rf_change, 'rf.changed' )

    #--------------------------------------------------------------------
    # Specification of random parameters 
    #--------------------------------------------------------------------
    # 
    rv_list = Property( List( RandomVariable ), depends_on = 'rf_change' )
    @cached_property
    def _get_rv_list( self ):
        rf = self.rf
        param_tuple = zip( rf.param_keys, rf.param_values, rf.param_traits )
        return [ RandomVariable( spirrid = self, rf = self.rf,
                     name = nm, trait_value = tv, source_trait = st )
                 for nm, tv, st in param_tuple ]

    # key-based access to the random variables
    rv_dict = Property( Dict, depends_on = 'rf_change' )
    @cached_property
    def _get_rv_dict( self ):
        d = {}
        for rv in self.rv_list:
            d[rv.name] = rv
        return d

    def set_random( self, variable, distribution = 'uniform', discr_type = 'T grid',
                    loc = 0., scale = 1., shape = 1., n_int = 30 ):
        '''Declare a variable as random 
        '''
        self.rv_dict[ variable ].set_random( distribution, discr_type,
                                             loc, scale, shape, n_int )

    def unset_random( self, variable ):
        '''Delete declaration of random variable
        '''
        self.rv_dict[ variable ].unset_random()

    def unset_all_random( self ):
        '''Set all variables to detereministic'''
        map( lambda rv: rv.unset_random, self.rv_list )

    n_rv = Property( depends_on = 'rnd_change' )
    @cached_property
    def _get_n_rv( self ):
        return self.rv_random_keys.size

    # subsidiary methods for sorted access to the random variables.
    # (note dictionary has not defined order of its items)
    rv_random_keys = Property( List, depends_on = 'rf_change' )
    @cached_property
    def _get_rv_random_keys( self ):
        random_pattern = array( [ rv.random for rv in self.rv_list ], dtype = bool )
        ridx = where( random_pattern == True )[0]
        return self.rv_keys[ ridx ]

    # subsidiary methods for sorted access to the random variables.
    # (note dictionary has not defined order of its items)
    rv_keys = Property( List, depends_on = 'rf_change' )
    @cached_property
    def _get_rv_keys( self ):
        return array( self.rf.param_keys )

    #--------------------------------------------------------------------
    # Define which changes in the response function and in the 
    # statistical parameters are relevant for reevaluation of the response
    #--------------------------------------------------------------------
    rf_change = Event
    @on_trait_change( 'rf.changed' )
    def _set_rf_change( self ):
        self.rf_change = True

    rand_change = Event
    @on_trait_change( 'rv_list, rv_list.changed' )
    def _set_rand_change( self ):
        self.rand_change = True

    conf_change = Event
    @on_trait_change( '+alg_option' )
    def _set_conf_change( self ):
        self.conf_change = True

    eps_change = Event
    @on_trait_change( '+eps_range' )
    def _set_eps_change( self ):
        self.eps_change = True

    # List of discretized statistical domains
    # 
    theta_arr_list = Property( depends_on = 'rf_change, rand_change' )
    @cached_property
    def _get_theta_arr_list( self ):
        '''Get list of arrays with discretized RandomVariables.
        '''
        return [ rv.theta_arr for rv in self.rv_list ]

    # Discretized statistical domain
    # 
    theta_ogrid = Property( depends_on = 'rf_change, rand_change' )
    @cached_property
    def _get_theta_ogrid( self ):
        '''Get orthogonal list of arrays with discretized RandomVariables.
        '''
        return orthogonalize( self.theta_arr_list )

    #---------------------------------------------------------------------------------
    # PDF * Theta arrays oriented in enumerated dimensions - broadcasting possible
    #---------------------------------------------------------------------------------
    dG_ogrid = Property( depends_on = 'rf_change, rand_change' )
    @cached_property
    def _get_dG_ogrid( self ):
        '''Get orthogonal list of arrays with PDF * Theta product of.
        '''
        dG_arr_list = [ rv.dG_arr for rv in self.rv_list ]
        return orthogonalize( dG_arr_list )

    #---------------------------------------------------------------------------------
    # PDF grid - mutually multiplied arrays of PDF
    #---------------------------------------------------------------------------------
    dG_grid = Property( depends_on = 'rf_change, rand_change' )
    @cached_property
    def _get_dG_grid( self ):
        if len( self.dG_ogrid ):
            return reduce( lambda x, y: x * y, self.dG_ogrid )
        else:
            return 1.0
Ejemplo n.º 20
0
class ECBLCalibState(HasStrictTraits):

    # rupture moment and normal force measured in the calibration experiment
    # (three point bending test)
    #
    Mu = Float(3.5, enter_set = True, auto_set = False, input = True) # [kNm]
    Nu = Float(0.0, enter_set = True, auto_set = False, input = True) # [kN]

    #===========================================================================
    # Cross Section Specification (Geometry and Layout)
    #===========================================================================

    cs_geo = Instance(ECBCrossSectionGeo)
    def _cs_geo_default(self):
        return ECBCrossSectionGeo(notify_change = self.set_modified)

    cs_state = Property(Instance(ECBCrossSectionState), depends_on = 'cs_geo')
    @cached_property
    def _get_cs_state(self):
        return ECBCrossSectionState(cs_geo = self.cs_geo,
                                    notify_change = self.set_modified)

    notify_change = Callable(None)

    modified = Event
    @on_trait_change('+input')
    def set_modified(self):
        self.modified = True
        if self.notify_change != None:
            self.notify_change()

    u0 = Property(Array(float), depends_on = 'cs_geo.modified, cs_state.modified')
    @cached_property
    def _get_u0(self):
        u0 = self.cs_state.ecb_law.u0
        eps_up = -self.cs_geo.cc_law.eps_c_u
        self.cs_state.set(eps_up = eps_up)
        eps_lo = self.cs_state.convert_eps_tex_u_2_lo(u0[0])
        return np.array([eps_lo, u0[1] ], dtype = 'float')

    # iteration counter
    #
    n = Int(0)
    def get_lack_of_fit(self, u):
        '''Return the difference between 'N_external' and 'N_internal' as well as 'M_external' and 'M_internal'
        N_c (=compressive force of the compressive zone of the concrete)
        N_t (=total tensile force of the reinforcement layers)
        '''

        print '--------------------iteration', self.n, '------------------------'
        self.n += 1
        # set iteration counter
        #
        eps_up = -self.cs_geo.cc_law.eps_c_u
        eps_lo = u[0]
        self.cs_state.set(eps_lo = eps_lo, eps_up = eps_up)

        eps_tex_u = self.cs_state.convert_eps_lo_2_tex_u(u[0])

        self.cs_geo.ecb_law.set_cparams(eps_tex_u, u[1])

        N_internal = self.cs_state.N
        M_internal = self.cs_state.M

        d_N = N_internal - self.Nu
        d_M = M_internal - self.Mu

        return np.array([ d_M, d_N ], dtype = float)

    # solution vector returned by 'fit_response'
    #
    u_sol = Property(Array(Float), depends_on = 'modified')
    @cached_property
    def _get_u_sol(self):
        '''iterate 'eps_t' such that the lack of fit between the calculated
        normal forces in the tensile reinforcement and the compressive zone (concrete)
        is smaller then 'xtol' defined in function 'brentq'.
        NOTE: the method 'get_lack_of_fit' returns the relative error.
        '''

        # use scipy-functionality to get the iterated value of 'eps_t'
        # NOTE: get_lack_of_fit must have a sign change as a requirement
        # for the function call 'brentq' to work property. 

        # The method brentq has optional arguments such as
        #   'xtol'    - absolut error (default value = 1.0e-12)
        #   'rtol'    - relative error (not supported at the time)
        #   'maxiter' - maximum numbers of iterations used
        #
        return fsolve(self.get_lack_of_fit, self.u0, xtol = 1.0e-5)

    #===========================================================================
    # Calibrated ecb_law_mfn
    #===========================================================================

    calibrated_ecb_law = Property(depends_on = 'modified')
    @cached_property
    def _get_calibrated_ecb_law(self):
        print 'NEW CALIBRATION'
        self.cs_geo.ecb_law.set_cparams(*self.u_sol)
        return self.cs_geo.ecb_law

    view = View(Item('Mu'),
                Item('Nu'),
                buttons = ['OK', 'Cancel']
                )
Ejemplo n.º 21
0
class SimCrackLoc(IBVModel):
    '''Model assembling the components for studying the restrained crack localization.
    '''

    geo_transform = Instance(FlawCenteredGeoTransform)

    def _geo_transform_default(self):
        return FlawCenteredGeoTransform()

    shape = Int(10,
                desc='Number of finite elements',
                ps_levsls=(10, 40, 4),
                input=True)

    length = Float(1000,
                   desc='Length of the simulated region',
                   unit='mm',
                   input=True)

    flaw_position = Float(500, input=True, unit='mm')

    flaw_radius = Float(100, input=True, unit='mm')

    reduction_factor = Float(0.9, input=True)

    elastic_fraction = Float(0.9, input=True)

    avg_radius = Float(400, input=True, unit='mm')

    # tensile strength of concrete
    f_m_t = Float(3.0, input=True, unit='MPa')

    epsilon_0 = Property(unit='-')

    def _get_epsilon_0(self):
        return self.f_m_t / self.E_m

    epsilon_f = Float(10, input=True, unit='-')

    h_m = Float(10, input=True, unit='mm')

    b_m = Float(8, input=True, unit='mm')

    A_m = Property(unit='m^2')

    def _get_A_m(self):
        return self.b_m * self.h_m

    E_m = Float(30.0e5, input=True, unit='MPa')

    E_f = Float(70.0e6, input=True, unit='MPa')

    A_f = Float(1.0, input=True, unit='mm^2')

    s_crit = Float(0.009, input=True, unit='mm')

    P_f = Property(depends_on='+input')

    @cached_property
    def _get_P_f(self):
        return sqrt(4 * self.A_f * pi)

    K_b = Property(depends_on='+input')

    @cached_property
    def _get_K_b(self):
        return self.T_max / self.s_crit

    tau_max = Float(0.0, input=True, unit='MPa')

    T_max = Property(depends_on='+input', unit='N/mm')

    @cached_property
    def _get_T_max(self):
        return self.tau_max * self.P_f

    rho = Property(depends_on='+input')

    @cached_property
    def _get_rho(self):
        return self.A_f / (self.A_f + self.A_m)

    #-------------------------------------------------------
    # Material model for the matrix
    #-------------------------------------------------------
    mats_m = Property(Instance(MATS1DDamageWithFlaw), depends_on='+input')

    @cached_property
    def _get_mats_m(self):
        mats_m = MATS1DDamageWithFlaw(E=self.E_m * self.A_m,
                                      flaw_position=self.flaw_position,
                                      flaw_radius=self.flaw_radius,
                                      reduction_factor=self.reduction_factor,
                                      epsilon_0=self.epsilon_0,
                                      epsilon_f=self.epsilon_f)
        return mats_m

    mats_f = Instance(MATS1DElastic)

    def _mats_f_default(self):
        mats_f = MATS1DElastic(E=self.E_f * self.A_f)
        return mats_f

    mats_b = Instance(MATS1DEval)

    def _mats_b_default(self):
        mats_b = MATS1DElastic(E=self.K_b)
        mats_b = MATS1DPlastic(E=self.K_b,
                               sigma_y=self.T_max,
                               K_bar=0.,
                               H_bar=0.)  # plastic function of slip
        return mats_b

    mats_fb = Property(Instance(MATS1D5Bond), depends_on='+input')

    @cached_property
    def _get_mats_fb(self):

        # Material model construction
        return MATS1D5Bond(
            mats_phase1=MATS1DElastic(E=0),
            mats_phase2=self.mats_f,
            mats_ifslip=self.mats_b,
            mats_ifopen=MATS1DElastic(
                E=0)  # elastic function of open - inactive
        )

    #-------------------------------------------------------
    # Finite element type
    #-------------------------------------------------------
    fets_m = Property(depends_on='+input')

    @cached_property
    def _get_fets_m(self):
        fets_eval = FETS1D2L(mats_eval=self.mats_m)
        #fets_eval = FETS1D2L3U( mats_eval = self.mats_m )
        return fets_eval

    fets_fb = Property(depends_on='+input')

    @cached_property
    def _get_fets_fb(self):
        return FETS1D52L4ULRH(mats_eval=self.mats_fb)
        #return FETS1D52L6ULRH( mats_eval = self.mats_fb )
        #return FETS1D52L8ULRH( mats_eval = self.mats_fb )

    #--------------------------------------------------------------------------------------
    # Mesh integrator
    #--------------------------------------------------------------------------------------
    fe_domain_structure = Property(depends_on='+input')

    @cached_property
    def _get_fe_domain_structure(self):
        '''Root of the domain hierarchy
        '''
        elem_length = self.length / float(self.shape)

        fe_domain = FEDomain()

        fe_m_level = FERefinementGrid(name='matrix domain',
                                      domain=fe_domain,
                                      fets_eval=self.fets_m)

        fe_grid_m = FEGrid(name='matrix grid',
                           coord_max=(self.length, ),
                           shape=(self.shape, ),
                           level=fe_m_level,
                           fets_eval=self.fets_m,
                           geo_transform=self.geo_transform)

        fe_fb_level = FERefinementGrid(name='fiber bond domain',
                                       domain=fe_domain,
                                       fets_eval=self.fets_fb)

        fe_grid_fb = FEGrid(coord_min=(0., length / 5.),
                            coord_max=(length, 0.),
                            shape=(self.shape, 1),
                            level=fe_fb_level,
                            fets_eval=self.fets_fb,
                            geo_transform=self.geo_transform)

        return fe_domain, fe_grid_m, fe_grid_fb, fe_m_level, fe_fb_level

    fe_domain = Property

    def _get_fe_domain(self):
        return self.fe_domain_structure[0]

    fe_grid_m = Property

    def _get_fe_grid_m(self):
        return self.fe_domain_structure[1]

    fe_grid_fb = Property

    def _get_fe_grid_fb(self):
        return self.fe_domain_structure[2]

    fe_m_level = Property

    def _get_fe_m_level(self):
        return self.fe_domain_structure[3]

    fe_fb_level = Property

    def _get_fe_fb_level(self):
        return self.fe_domain_structure[4]

    #---------------------------------------------------------------------------
    # Load scaling adapted to the elastic and inelastic regime
    #---------------------------------------------------------------------------
    final_displ = Property(depends_on='+input')

    @cached_property
    def _get_final_displ(self):
        damage_onset_displ = self.mats_m.epsilon_0 * self.length
        return damage_onset_displ / self.elastic_fraction

    step_size = Property(depends_on='+input')

    @cached_property
    def _get_step_size(self):
        n_steps = self.n_steps
        return 1.0 / float(n_steps)

    time_function = Property(depends_on='+input')

    @cached_property
    def _get_time_function(self):
        '''Get the time function so that the elastic regime 
        is skipped in a single step.
        '''
        step_size = self.step_size

        elastic_value = self.elastic_fraction * 0.98 * self.reduction_factor
        inelastic_value = 1.0 - elastic_value

        def ls(t):
            if t <= step_size:
                return (elastic_value / step_size) * t
            else:
                return elastic_value + (t - step_size) * (inelastic_value) / (
                    1 - step_size)

        return ls

    def plot_time_function(self, p):
        '''Plot the time function.
        '''
        n_steps = self.n_steps
        mats = self.mats
        step_size = self.step_size

        ls_t = linspace(0, step_size * n_steps, n_steps + 1)
        ls_fn = frompyfunc(self.time_function, 1, 1)
        ls_v = ls_fn(ls_t)

        p.subplot(321)
        p.plot(ls_t, ls_v, 'ro-')

        final_epsilon = self.final_displ / self.length

        kappa = linspace(mats.epsilon_0, final_epsilon, 10)
        omega_fn = frompyfunc(lambda kappa: mats._get_omega(None, kappa), 1, 1)
        omega = omega_fn(kappa)
        kappa_scaled = (step_size + (1 - step_size) *
                        (kappa - mats.epsilon_0) /
                        (final_epsilon - mats.epsilon_0))
        xdata = hstack([array([0.0], dtype=float), kappa_scaled])
        ydata = hstack([array([0.0], dtype=float), omega])
        p.plot(xdata, ydata, 'g')
        p.xlabel('regular time [-]')
        p.ylabel('scaled time [-]')

    run = Button

    @on_trait_change('run')
    def peval(self):
        '''Evaluation procedure.
        '''
        #mv = MATS1DDamageView( model = mats_eval )
        #mv.configure_traits()

        right_dof_m = self.fe_grid_m[-1, -1].dofs[0, 0, 0]

        right_dof_fb = self.fe_grid_fb[-1, -1, -1, -1].dofs[0, 0, 0]
        # Response tracers
        A = self.A_m + self.A_f
        self.sig_eps_m = RTraceGraph(name='F_u_m',
                                     var_y='F_int',
                                     idx_y=right_dof_m,
                                     var_x='U_k',
                                     idx_x=right_dof_m,
                                     transform_y='y / %g' % A)

        # Response tracers
        self.sig_eps_f = RTraceGraph(name='F_u_f',
                                     var_y='F_int',
                                     idx_y=right_dof_fb,
                                     var_x='U_k',
                                     idx_x=right_dof_fb,
                                     transform_y='y / %g' % A)

        self.eps_m_field = RTraceDomainListField(name='eps_m',
                                                 position='int_pnts',
                                                 var='eps_app',
                                                 warp=False)

        self.eps_f_field = RTraceDomainListField(name='eps_f',
                                                 position='int_pnts',
                                                 var='mats_phase2_eps_app',
                                                 warp=False)
        # Response tracers
        self.sig_m_field = RTraceDomainListField(name='sig_m',
                                                 position='int_pnts',
                                                 var='sig_app')

        self.sig_f_field = RTraceDomainListField(name='sig_f',
                                                 position='int_pnts',
                                                 var='mats_phase2_sig_app')

        self.omega_m_field = RTraceDomainListField(name='omega_m',
                                                   position='int_pnts',
                                                   var='omega',
                                                   warp=False)

        self.shear_flow_field = RTraceDomainListField(name='shear flow',
                                                      position='int_pnts',
                                                      var='shear_flow',
                                                      warp=False)

        self.slip_field = RTraceDomainListField(name='slip',
                                                position='int_pnts',
                                                var='slip',
                                                warp=False)

        avg_processor = None
        if self.avg_radius > 0.0:
            n_dofs = self.fe_domain.n_dofs
            avg_processor = RTUAvg(sd=self.fe_m_level,
                                   n_dofs=n_dofs,
                                   avg_fn=QuarticAF(radius=self.avg_radius))

        ts = TStepper(
            u_processor=avg_processor,
            dof_resultants=True,
            sdomain=self.fe_domain,
            bcond_list=[  # define the left clamping 
                BCSlice(var='u',
                        value=0.,
                        dims=[0],
                        slice=self.fe_grid_fb[0, 0, 0, :]),
                #                                BCSlice( var = 'u', value = 0., dims = [0], slice = self.fe_grid_m[ 0, 0 ] ),
                # loading at the right edge
                #                                 BCSlice( var = 'f', value = 1, dims = [0], slice = domain[-1, -1, -1, 0],
                #                                         time_function = ls ),
                BCSlice(var='u',
                        value=self.final_displ,
                        dims=[0],
                        slice=self.fe_grid_fb[-1, -1, -1, :],
                        time_function=self.time_function),
                #                                 BCSlice( var = 'u', value = self.final_displ, dims = [0], slice = self.fe_grid_m[-1, -1],
                #                                         time_function = self.time_function ),
                # fix horizontal displacement in the top layer
                #                                 BCSlice( var = 'u', value = 0., dims = [0], slice = domain[:, -1, :, -1] ),
                # fix the vertical displacement all over the domain
                BCSlice(var='u',
                        value=0.,
                        dims=[1],
                        slice=self.fe_grid_fb[:, :, :, :]),
                #                            # Connect bond and matrix domains
                BCDofGroup(var='u',
                           value=0.,
                           dims=[0],
                           get_link_dof_method=self.fe_grid_fb.get_bottom_dofs,
                           get_dof_method=self.fe_grid_m.get_all_dofs,
                           link_coeffs=[1.])
            ],
            rtrace_list=[
                self.sig_eps_m,
                self.sig_eps_f,
                self.eps_m_field,
                self.eps_f_field,
                self.sig_m_field,
                self.sig_f_field,
                self.omega_m_field,
                self.shear_flow_field,
                self.slip_field,
            ])

        # Add the time-loop control
        tloop = TLoop(tstepper=ts,
                      KMAX=300,
                      tolerance=1e-5,
                      debug=False,
                      verbose_iteration=True,
                      verbose_time=False,
                      tline=TLine(min=0.0, step=self.step_size, max=1.0))

        tloop.on_accept_time_step = self.plot

        U = tloop.eval()

        self.sig_eps_f.refresh()
        max_sig_m = max(self.sig_eps_m.trace.ydata)
        return array([U[right_dof_m], max_sig_m], dtype='float_')

    #--------------------------------------------------------------------------------------
    # Tracers
    #--------------------------------------------------------------------------------------

    def plot_sig_eps(self, p):
        p.set_xlabel('control displacement [mm]')
        p.set_ylabel('stress [MPa]')

        self.sig_eps_m.refresh()
        self.sig_eps_m.trace.plot(p, 'o-')

        self.sig_eps_f.refresh()
        self.sig_eps_f.trace.plot(p, 'o-')

        p.plot(self.sig_eps_m.trace.xdata,
               self.sig_eps_m.trace.ydata + self.sig_eps_f.trace.ydata, 'o-')

    def plot_eps(self, p):
        eps_m = self.eps_m_field.subfields[0]
        xdata = eps_m.vtk_X[:, 0]
        ydata = eps_m.field_arr[:, 0, 0]
        idata = argsort(xdata)
        p.plot(xdata[idata], ydata[idata], 'o-')

        eps_f = self.eps_f_field.subfields[1]
        xdata = eps_f.vtk_X[:, 0]
        ydata = eps_f.field_arr[:, 0, 0]
        idata = argsort(xdata)
        p.plot(xdata[idata], ydata[idata], 'o-')

        p.set_ylim(ymin=0)
        p.set_xlabel('bar axis [mm]')
        p.set_ylabel('strain [-]')

    def plot_omega(self, p):
        omega_m = self.omega_m_field.subfields[0]
        xdata = omega_m.vtk_X[:, 0]
        ydata = omega_m.field_arr[:]
        idata = argsort(xdata)
        p.fill(xdata[idata], ydata[idata], facecolor='gray', alpha=0.2)

        print 'max omega', max(ydata[idata])

        p.set_ylim(ymin=0, ymax=1.0)
        p.set_xlabel('bar axis [mm]')
        p.set_ylabel('omega [-]')

    def plot_sig(self, p):
        sig_m = self.sig_m_field.subfields[0]
        xdata = sig_m.vtk_X[:, 0]
        ydata = sig_m.field_arr[:, 0, 0]
        idata = argsort(xdata)
        ymax = max(ydata)
        p.plot(xdata[idata], ydata[idata], 'o-')

        sig_f = self.sig_f_field.subfields[1]
        xdata = sig_f.vtk_X[:, 0]
        ydata = sig_f.field_arr[:, 0, 0]
        idata = argsort(xdata)
        p.plot(xdata[idata], ydata[idata], 'o-')

        xdata = sig_f.vtk_X[:, 0]
        ydata = sig_f.field_arr[:, 0, 0] + sig_m.field_arr[:, 0, 0]
        p.plot(xdata[idata], ydata[idata], 'ro-')

        p.set_ylim(ymin=0)  # , ymax = 1.2 * ymax )
        p.set_xlabel('bar axis [mm]')
        p.set_ylabel('stress [MPa]')

    def plot_shear_flow(self, p):
        shear_flow = self.shear_flow_field.subfields[1]
        xdata = shear_flow.vtk_X[:, 0]
        ydata = shear_flow.field_arr[:, 0] / self.P_f
        idata = argsort(xdata)
        ymax = max(ydata)
        p.plot(xdata[idata], ydata[idata], 'o-')

        p.set_xlabel('bar axis [mm]')
        p.set_ylabel('shear flow [N/m]')

    def plot_slip(self, p):
        slip = self.slip_field.subfields[1]
        xdata = slip.vtk_X[:, 0]
        ydata = slip.field_arr[:, 0]
        idata = argsort(xdata)
        ymax = max(ydata)
        p.plot(xdata[idata], ydata[idata], 'ro-')

        p.set_xlabel('bar axis [mm]')
        p.set_ylabel('slip [N/m]')

    def plot_tracers(self, p=p):

        p.subplot(221)
        self.plot_sig_eps(p)

        p.subplot(223)
        self.plot_eps(p)

        p.subplot(224)
        self.plot_sig(p)

    #---------------------------------------------------------------
    # PLOT OBJECT
    #-------------------------------------------------------------------
    figure_ld = Instance(Figure)

    def _figure_ld_default(self):
        figure = Figure(facecolor='white')
        figure.add_axes([0.12, 0.13, 0.85, 0.74])
        return figure

    #---------------------------------------------------------------
    # PLOT OBJECT
    #-------------------------------------------------------------------
    figure_eps = Instance(Figure)

    def _figure_eps_default(self):
        figure = Figure(facecolor='white')
        figure.add_axes([0.12, 0.13, 0.85, 0.74])
        return figure

    #---------------------------------------------------------------
    # PLOT OBJECT
    #-------------------------------------------------------------------
    figure_shear_flow = Instance(Figure)

    def _figure_shear_flow_default(self):
        figure = Figure(facecolor='white')
        figure.add_axes([0.12, 0.13, 0.85, 0.74])
        return figure

    #---------------------------------------------------------------
    # PLOT OBJECT
    #-------------------------------------------------------------------
    figure_sig = Instance(Figure)

    def _figure_sig_default(self):
        figure = Figure(facecolor='white')
        figure.add_axes([0.12, 0.13, 0.85, 0.74])
        return figure

    #---------------------------------------------------------------
    # PLOT OBJECT
    #-------------------------------------------------------------------
    figure_shear_flow = Instance(Figure)

    def _figure_shear_flow_default(self):
        figure = Figure(facecolor='white')
        figure.add_axes([0.12, 0.13, 0.85, 0.74])
        return figure

    def plot(self):

        self.figure_ld.clear()
        ax = self.figure_ld.gca()
        self.plot_sig_eps(ax)

        self.figure_eps.clear()
        ax = self.figure_eps.gca()
        self.plot_eps(ax)
        ax2 = ax.twinx()
        self.plot_omega(ax2)

        self.figure_sig.clear()
        ax = self.figure_sig.gca()
        self.plot_sig(ax)
        ax2 = ax.twinx()
        self.plot_omega(ax2)

        self.figure_shear_flow.clear()
        ax = self.figure_shear_flow.gca()
        self.plot_shear_flow(ax)
        ax2 = ax.twinx()
        self.plot_slip(ax2)

        self.data_changed = True

    def get_sim_outputs(self):
        '''
        Specifies the results and their order returned by the model
        evaluation.
        '''
        return [
            SimOut(name='right end displacement', unit='m'),
            SimOut(name='peak load', uni='MPa')
        ]

    data_changed = Event

    toolbar = ToolBar(Action(name="Run",
                             tooltip='Start computation',
                             image=ImageResource('kt-start'),
                             action="start_study"),
                      Action(name="Pause",
                             tooltip='Pause computation',
                             image=ImageResource('kt-pause'),
                             action="pause_study"),
                      Action(name="Stop",
                             tooltip='Stop computation',
                             image=ImageResource('kt-stop'),
                             action="stop_study"),
                      image_size=(32, 32),
                      show_tool_names=False,
                      show_divider=True,
                      name='view_toolbar'),

    traits_view = View(HSplit(
        VSplit(
            Item('run', show_label=False),
            VGroup(
                Item('shape'),
                Item('n_steps'),
                Item('length'),
                label='parameters',
                id='crackloc.viewmodel.factor.geometry',
                dock='tab',
                scrollable=True,
            ),
            VGroup(Item('E_m'),
                   Item('f_m_t'),
                   Item('avg_radius'),
                   Item('h_m'),
                   Item('b_m'),
                   Item('A_m', style='readonly'),
                   Item('mats_m', show_label=False),
                   label='Matrix',
                   dock='tab',
                   id='crackloc.viewmodel.factor.matrix',
                   scrollable=True),
            VGroup(Item('E_f'),
                   Item('A_f'),
                   Item('mats_f', show_label=False),
                   label='Fiber',
                   dock='tab',
                   id='crackloc.viewmodel.factor.fiber',
                   scrollable=True),
            VGroup(Group(
                Item('tau_max'),
                Item('s_crit'),
                Item('P_f', style='readonly', show_label=True),
                Item('K_b', style='readonly', show_label=True),
                Item('T_max', style='readonly', show_label=True),
            ),
                   Item('mats_b', show_label=False),
                   label='Bond',
                   dock='tab',
                   id='crackloc.viewmodel.factor.bond',
                   scrollable=True),
            VGroup(Item('rho', style='readonly', show_label=True),
                   label='Composite',
                   dock='tab',
                   id='crackloc.viewmodel.factor.jcomposite',
                   scrollable=True),
            id='crackloc.viewmodel.left',
            label='studied factors',
            layout='tabbed',
            dock='tab',
        ),
        VSplit(
            VGroup(
                Item('figure_ld',
                     editor=MPLFigureEditor(),
                     resizable=True,
                     show_label=False),
                label='stress-strain',
                id='crackloc.viewmode.figure_ld_window',
                dock='tab',
            ),
            VGroup(
                Item('figure_eps',
                     editor=MPLFigureEditor(),
                     resizable=True,
                     show_label=False),
                label='strains profile',
                id='crackloc.viewmode.figure_eps_window',
                dock='tab',
            ),
            VGroup(
                Item('figure_sig',
                     editor=MPLFigureEditor(),
                     resizable=True,
                     show_label=False),
                label='stress profile',
                id='crackloc.viewmode.figure_sig_window',
                dock='tab',
            ),
            VGroup(
                Item('figure_shear_flow',
                     editor=MPLFigureEditor(),
                     resizable=True,
                     show_label=False),
                label='bond shear and slip profiles',
                id='crackloc.viewmode.figure_shear_flow_window',
                dock='tab',
            ),
            id='crackloc.viewmodel.right',
        ),
        id='crackloc.viewmodel.splitter',
    ),
                       title='SimVisage Component: Crack localization',
                       id='crackloc.viewmodel',
                       dock='tab',
                       resizable=True,
                       height=0.8,
                       width=0.8,
                       buttons=[OKButton])
Ejemplo n.º 22
0
class RepresentativeCB(HasTraits):
    CB_model = Instance(CompositeCrackBridge)
    n_w = Int
    n_BC = Int
    load_sigma_c_arr = Array
    length = Float
    CB_model_view = Property(Instance(CompositeCrackBridgeView),
                             depends_on='CB_model')

    @cached_property
    def _get_CB_model_view(self):
        return CompositeCrackBridgeView(model=self.CB_model)

    def max_sigma_w(self, Ll, Lr):
        self.CB_model_view.model.Ll = Ll
        self.CB_model_view.model.Lr = Lr
        max_sigma_c, max_w = self.CB_model_view.sigma_c_max
        if max_sigma_c < self.load_sigma_c_arr[-1]:
            return max_sigma_c, max_w
        else:
            self.CB_model_view.apply_load(self.load_sigma_c_arr[-1] - 1e-10)
            return self.load_sigma_c_arr[-1] - 1e-10, self.CB_model_view.model.w

    BC_range = Property(depends_on='n_BC, CB_model')

    @cached_property
    def _get_BC_range(self):
        self.max_sigma_w(1e5, 1e5)
        Lmax = min(self.CB_model_view.x_arr[-2], self.length)
        bc_range = np.logspace(np.log10(1.0), np.log10(Lmax), self.n_BC)
        return bc_range

    def w_x_res(self, w_arr, ll, lr):
        self.CB_model_view.model.Ll = ll
        self.CB_model_view.model.Lr = lr
        cb_epsm_interpolators_lst = [
            MFnLineArray(xdata=np.linspace(-1e5, 1e5, 5), ydata=np.zeros(5))
        ]
        cb_epsf_interpolators_lst = [
            MFnLineArray(xdata=np.linspace(-1e5, 1e5, 5), ydata=np.zeros(5))
        ]
        sigma_c_lst = [0.0]
        w_lst = [0.0]
        for w in w_arr:
            self.CB_model_view.model.w = w
            if self.CB_model_view.sigma_c > sigma_c_lst[-1]:
                w_lst.append(w)
                sigma_c_lst.append(self.CB_model_view.sigma_c)
                x_i = np.hstack((-self.length - 1e-1, self.CB_model_view.x_arr,
                                 self.length + 1e-1))
                epsm_i = np.hstack((self.CB_model_view.epsm_arr[0],
                                    self.CB_model_view.epsm_arr,
                                    self.CB_model_view.epsm_arr[-1]))
                epsf_i = np.hstack((self.CB_model_view.epsf_arr[0],
                                    self.CB_model_view.epsf_arr,
                                    self.CB_model_view.epsf_arr[-1]))
                cb_epsm_interpolators_lst.append(
                    MFnLineArray(xdata=x_i, ydata=epsm_i))
                cb_epsf_interpolators_lst.append(
                    MFnLineArray(xdata=x_i, ydata=epsf_i))
        w_interpolator = MFnLineArray(xdata=np.array(sigma_c_lst),
                                      ydata=np.array(w_lst))
        return w_interpolator, [sigma_c_lst, cb_epsm_interpolators_lst
                                ], [sigma_c_lst, cb_epsf_interpolators_lst]

    interpolator_lists = Property(
        Array, depends_on='CB_model, load_sigma_c_arr, n_w, n_x, n_BC')

    @cached_property
    def _get_interpolator_lists(self):
        epsm_interpolators = np.zeros((self.n_BC, self.n_BC), dtype=np.object)
        epsf_interpolators = np.zeros((self.n_BC, self.n_BC), dtype=np.object)
        w_interpolators = np.zeros((self.n_BC, self.n_BC), dtype=np.object)
        loops_tot = self.n_BC**2
        max_sigma_c_arr = np.zeros((self.n_BC, self.n_BC))
        for i, ll in enumerate(self.BC_range):
            for j, lr in enumerate(self.BC_range):
                if j >= i:
                    # find maximum
                    sigma_c_max, wmax = self.max_sigma_w(ll, lr)
                    # print 'Ll, Lr, sigmacmax: ', ll, lr, sigma_c_max
                    max_sigma_c_arr[i, j] = max_sigma_c_arr[j, i] = sigma_c_max
                    w_arr0 = np.linspace(1e-10, wmax, self.n_w)
                    w_interpolator, epsm_interp_lst, epsf_interp_lst = self.w_x_res(
                        w_arr0, ll, lr)
                    epsm_interpolators[i, j] = epsm_interpolators[
                        j, i] = epsm_interp_lst
                    epsf_interpolators[i, j] = epsf_interpolators[
                        j, i] = epsf_interp_lst
                    w_interpolators[i, j] = w_interpolators[j,
                                                            i] = w_interpolator
                current_loop = i * len(self.BC_range) + j + 1
                print 'progress: %2.1f %%' % \
                    (current_loop / float(loops_tot) * 100.)
        interp_max_sigma_c = interp2d(self.BC_range,
                                      self.BC_range,
                                      max_sigma_c_arr,
                                      fill_value=None)
        return interp_max_sigma_c, epsm_interpolators, w_interpolators, epsf_interpolators

    def get_BC_idxs(self, Ll, Lr):
        if Ll >= self.BC_range[-1]:
            ll_idx_high = -1
            ll_idx_low = -1
        elif Ll <= self.BC_range[0]:
            ll_idx_high = 0
            ll_idx_low = 0
        else:
            ll_idx_high = np.argwhere(Ll <= self.BC_range)[0][0]
            ll_idx_low = np.argwhere(Ll >= self.BC_range)[-1][0]
        if Lr > self.BC_range[-1]:
            lr_idx_high = -1
            lr_idx_low = -1
        elif Lr <= self.BC_range[0]:
            lr_idx_high = 0
            lr_idx_low = 0
        else:
            lr_idx_high = np.argwhere(Lr <= self.BC_range)[0][0]
            lr_idx_low = np.argwhere(Lr >= self.BC_range)[-1][0]
        return ll_idx_high, lr_idx_high, ll_idx_low, lr_idx_low

    def interpolate_max_sigma_c(self, Ll, Lr):
        return self.interpolator_lists[0](Ll, Lr)

    def interpolate_epsm(self, Ll, Lr, sigma_c, x_arr):
        ll_idx_high, lr_idx_high, ll_idx_low, lr_idx_low = self.get_BC_idxs(
            Ll, Lr)
        epsm_interpolator_lst = self.interpolator_lists[1][ll_idx_high,
                                                           lr_idx_high]
        sigc = np.array(epsm_interpolator_lst[0])
        if sigma_c > sigc[-1]:
            # applied stress is higher than crack bridge strength
            return np.repeat(np.nan, len(x_arr))
        else:
            sigc_high = np.argwhere(sigc > sigma_c)[0][0]
            sigc_low = np.argwhere(sigc < sigma_c)[-1][0]
            coeff_low = (sigc[sigc_high] - sigma_c) / \
                (sigc[sigc_high] - sigc[sigc_low])
            coeff_high = (sigma_c - sigc[sigc_low]) / (sigc[sigc_high] -
                                                       sigc[sigc_low])
            if Lr >= Ll:
                epsm = epsm_interpolator_lst[1][sigc_low].get_values(x_arr) * coeff_low + \
                    epsm_interpolator_lst[1][
                        sigc_high].get_values(x_arr) * coeff_high
            else:
                import matplotlib.pyplot as plt
                epsm = epsm_interpolator_lst[1][sigc_low].get_values(-x_arr[::-1]) * coeff_low + \
                    epsm_interpolator_lst[1][
                        sigc_high].get_values(-x_arr[::-1]) * coeff_high
                epsm = epsm[::-1]
            return epsm

    def interpolate_epsf(self, Ll, Lr, sigma_c, x_arr):
        ll_idx_high, lr_idx_high, ll_idx_low, lr_idx_low = self.get_BC_idxs(
            Ll, Lr)
        epsf_interpolator_lst = self.interpolator_lists[3][ll_idx_high,
                                                           lr_idx_high]
        sigc = np.array(epsf_interpolator_lst[0])
        if sigma_c > sigc[-1]:
            # applied stress is higher than crack bridge strength
            return np.repeat(np.nan, len(x_arr))
        else:
            sigc_high = np.argwhere(sigc > sigma_c)[0][0]
            sigc_low = np.argwhere(sigc < sigma_c)[-1][0]
            coeff_low = (sigc[sigc_high] - sigma_c) / \
                (sigc[sigc_high] - sigc[sigc_low])
            coeff_high = (sigma_c - sigc[sigc_low]) / (sigc[sigc_high] -
                                                       sigc[sigc_low])
            if Lr >= Ll:
                epsf = epsf_interpolator_lst[1][sigc_low].get_values(x_arr) * coeff_low + \
                    epsf_interpolator_lst[1][
                        sigc_high].get_values(x_arr) * coeff_high
            else:
                epsf = epsf_interpolator_lst[1][sigc_low].get_values(-x_arr[::-1]) * coeff_low + \
                    epsf_interpolator_lst[1][
                        sigc_high].get_values(-x_arr[::-1]) * coeff_high
                epsf = epsf[::-1]
            return epsf

    def interpolate_w(self, Ll, Lr, sigma_c):
        '''
        interpolation of w using the approach of interpolation on a 4-node rectangular finite element
        '''
        ll_idx_high, lr_idx_high, ll_idx_low, lr_idx_low = self.get_BC_idxs(
            Ll, Lr)
        ll_low, ll_high, lr_low, lr_high = self.BC_range[
            ll_idx_low], self.BC_range[ll_idx_high], self.BC_range[
                lr_idx_low], self.BC_range[lr_idx_high]
        # evaluating nodal values / note that w2 = w3
        w1 = self.interpolator_lists[2][ll_idx_low, lr_idx_low].get_values(
            np.array([sigma_c]))
        w2 = self.interpolator_lists[2][ll_idx_low, lr_idx_high].get_values(
            np.array([sigma_c]))
        w3 = self.interpolator_lists[2][ll_idx_high, lr_idx_low].get_values(
            np.array([sigma_c]))
        w4 = self.interpolator_lists[2][ll_idx_high, lr_idx_high].get_values(
            np.array([sigma_c]))
        nodal_values = [w1, w2, w3, w4]
        # shape functions
        if ll_idx_low == ll_idx_high:
            if lr_idx_low == lr_idx_high:
                # no interpolation
                return w1
            else:
                # 1D interpolation
                a = lr_high - lr_low
                N1 = lambda x: -(x - lr_high) / a
                N2 = lambda x: (x - lr_low) / a
                return w1 * N1(Lr) + w2 * N2(Lr)
        else:
            if lr_idx_low == lr_idx_high:
                # 1D interpolation
                a = ll_high - ll_low
                N1 = lambda x: -(x - ll_high) / a
                N2 = lambda x: (x - ll_low) / a
                return w2 * N1(Ll) + w3 * N2(Ll)
            else:
                # 2D interpolation
                ab = (ll_high - ll_low) * (lr_high - lr_low)
                N1 = lambda x, y: (x - ll_high) * (y - lr_high) / ab
                N2 = lambda x, y: -(x - ll_low) * (y - lr_high) / ab
                N3 = lambda x, y: (x - ll_low) * (y - lr_low) / ab
                N4 = lambda x, y: -(x - ll_high) * (y - lr_low) / ab
                shape_functions = [N1, N2, N3, N4]
                # interpolate w
                w_interpolated = 0.0
                for i, Ni in enumerate(shape_functions):
                    w_interpolated += nodal_values[i] * Ni(Ll, Lr)
                return w_interpolated
Ejemplo n.º 23
0
class YMBCrossCorrel(HasTraits):

    data = Instance(IYMBData)

    # convert the dictionary keys to an ordered list.
    var_name_list = Property(List)

    @cached_property
    def _get_var_name_list(self):
        return sorted(var_dict.keys())

    # list of data arrays in the order of the var_name_list
    var_arr_list = Property(List, depends_on='data.input_change')

    @cached_property
    def _get_var_arr_list(self):
        return [
            getattr(self.data, var_dict[var_name]).flatten()[:, None]
            for var_name in self.var_name_list
        ]

    corr_arr = Property(Array, depends_on='data.input_change')

    @cached_property
    def _get_corr_arr(self):
        print 'redrawing cross correl'
        # get the list of names and sort them alphabetically
        corr_data = ma.hstack(self.var_arr_list)
        # @kelidas: return small differences between ma and numpy corrcoef
        # return ma.corrcoef( corr_data, rowvar = False, allow_masked = True )
        return MatSpearman(corr_data)

    figure = Instance(Figure)

    def _figure_default(self):
        figure = Figure()
        figure.add_axes([0.1, 0.1, 0.8, 0.8])
        return figure

    data_changed = Event(True)

    @on_trait_change('data, data.input_change')
    def _redraw(self):
        figure = self.figure
        figure.clear()
        var_data = self.corr_arr

        figure.add_axes([0.1, 0.1, 0.8, 0.8])
        axes = figure.axes[0]
        axes.clear()
        x_coor = arange(var_data.shape[1])
        axes.grid()
        for i in range(0, var_data.shape[1]):
            axes.plot(x_coor[i:] - x_coor[i], var_data[i, (i):], '-x')
        axes.set_xlabel('$\mathrm{x}\, [\mu\mathrm{m}]$', fontsize=16)
        axes.set_ylabel('$\mathrm{correlation}$', fontsize=16)
        axes.set_ylim(-1, 1)

        self.data_changed = True

    traits_view_mpl = View(
        Group(
            #                       Group( Item( 'figure', style = 'custom',
            #                              editor = MPLFigureEditor(),
            #                              show_label = False )
            #                              , id = 'figure.view' ),
            Item('corr_arr',
                 show_label=False,
                 style='readonly',
                 editor=TabularEditor(adapter=ArrayAdapter()))),
        resizable=True,
    )

    traits_view = View(Item('corr_arr',
                            editor=tabular_editor,
                            show_label=False),
                       resizable=True,
                       scrollable=True,
                       buttons=['OK', 'Cancel'],
                       width=1.0,
                       height=0.5)
Ejemplo n.º 24
0
class Randomization( HasTraits ):
    '''Multidimensional statistical integration.
    
    Its name SPIRRID is an acronym for 
    Set of Parallel Independent Random Responses with Identical Distributions
    
    The package implements the evaluation of an integral over a set of 
    random variables affecting a response function RF and distributed 
    according to a probabilistic distribution PDistrib.
    
    The input parameters are devided in four categories in order
    to define state consistency of the evaluation. The outputs 
    are define as cached properties that are reevaluated in response
    to changes in the inputs.
    
    The following events accummulate changes in the input parameters of spirrid:
    rf_change - change in the response function
    rand_change - change in the randomization
    conf_change - change in the configuration of the algorithm
    eps_change - change in the studied range of the process control variable       
    '''
    #--------------------------------------------------------------------
    # Response function 
    #--------------------------------------------------------------------
    #
    rf = Instance( IRF )
    def _rf_changed( self ):
        self.on_trait_change( self._set_rf_change, 'rf.changed' )
        self.rv_dict = {}
    #--------------------------------------------------------------------
    # Specification of random parameters 
    #--------------------------------------------------------------------
    # 
    rv_dict = Dict
    def add_rv( self, variable, distribution = 'uniform', discr_type = 'T grid',
                loc = 0., scale = 1., shape = 1., n_int = 30 ):
        '''Declare a variable as random 
        '''
        if variable not in self.rf.param_keys:
            raise AssertionError, 'parameter %s not defined by the response function' \
                % variable

        params_with_distr = self.rf.traits( distr = lambda x: type( x ) == ListType
                                            and distribution in x )
        if variable not in params_with_distr:
            raise AssertionError, 'distribution type %s not allowed for parameter %s' \
                % ( distribution, variable )

        # @todo - let the RV take care of PDistrib specification.
        # isolate the dirty two-step definition of the distrib from spirrid 
        #
        pd = PDistrib( distr_choice = distribution, n_segments = n_int )
        pd.distr_type.set( scale = scale, shape = shape, loc = loc )
        self.rv_dict[variable] = RV( name = variable, discr_type = discr_type,
                                     pd = pd, n_int = n_int )

    def del_rv( self, variable ):
        '''Delete declaration of random variable
        '''
        del self.rv_dict[ variable ]

    def clear_rv( self ):
        self.rv_dict = {}

    # subsidiary methods for sorted access to the random variables.
    # (note dictionary has not defined order of its items)
    rv_keys = Property( List, depends_on = 'rv_dict' )
    @cached_property
    def _get_rv_keys( self ):
        rv_keys = sorted( self.rv_dict.keys() )
        # the random variable gets an index based on the 
        # sorted keys
        for idx, key in enumerate( rv_keys ):
            self.rv_dict[ key ].idx = idx
        return rv_keys

    rv_list = Property( List, depends_on = 'rv_dict' )
    @cached_property
    def _get_rv_list( self ):
        return map( self.rv_dict.get, self.rv_keys )

    #--------------------------------------------------------------------
    # Define which changes in the response function and in the 
    # statistical parameters are relevant for reevaluation of the response
    #--------------------------------------------------------------------
    rf_change = Event
    @on_trait_change( 'rf.changed' )
    def _set_rf_change( self ):
        self.rf_change = True

    rand_change = Event
    @on_trait_change( 'rv_dict, rv_dict.changed' )
    def _set_rand_change( self ):
        self.rand_change = True

    conf_change = Event
    @on_trait_change( '+alg_option' )
    def _set_conf_change( self ):
        self.conf_change = True

    eps_change = Event
    @on_trait_change( '+eps_range' )
    def _set_eps_change( self ):
        self.eps_change = True

    # Dictionary with key = rf parameters
    # and values = default param values for the resp func 
    #
    param_dict = Property( Dict, depends_on = 'rf_change, rand_change' )
    @cached_property
    def _get_param_dict( self ):
        '''Gather all the traits with the metadata distr specified.
        '''
        dict = {}
        for name, value in zip( self.rf.param_keys, self.rf.param_values ):
            rv = self.rv_dict.get( name, None )
            if rv == None:
                dict[ name ] = value
            else:
                dict[ name ] = self.theta_ogrid[ rv.idx ]
        return dict

    def get_rvs_theta_arr( self, n_samples ):
        rvs_theta_arr = array( [ repeat( value, n_samples ) for value in self.rf.param_values ] )
        for idx, name in enumerate( self.rf.param_keys ):
            rv = self.rv_dict.get( name, None )
            if rv:
                rvs_theta_arr[ idx, :] = rv.get_rvs_theta_arr( n_samples )
        return rvs_theta_arr

    # Constant parameters
    #
    const_param_dict = Property( Dict, depends_on = 'rf_change, rand_change' )
    @cached_property
    def _get_const_param_dict( self ):
        const_param_dict = {}
        for name, v in zip( self.rf.param_keys, self.rf.param_values ):
            if name not in self.rv_keys:
                const_param_dict[ name ] = v
        return const_param_dict

    # List of discretized statistical domains
    # 
    theta_arr_list = Property( depends_on = 'rf_change, rand_change' )
    @cached_property
    def _get_theta_arr_list( self ):
        '''Get list of arrays with discretized RVs.
        '''
        return [ rv.theta_arr for rv in self.rv_list ]

    # Discretized statistical domain
    # 
    theta_ogrid = Property( depends_on = 'rf_change, rand_change' )
    @cached_property
    def _get_theta_ogrid( self ):
        '''Get orthogonal list of arrays with discretized RVs.
        '''
        return orthogonalize( self.theta_arr_list )

    #---------------------------------------------------------------------------------
    # PDF * Theta arrays oriented in enumerated dimensions - broadcasting possible
    #---------------------------------------------------------------------------------
    dG_ogrid = Property( depends_on = 'rf_change, rand_change' )
    @cached_property
    def _get_dG_ogrid( self ):
        '''Get orthogonal list of arrays with PDF * Theta product of.
        '''
        dG_arr_list = [ rv.dG_arr for rv in self.rv_list ]
        return orthogonalize( dG_arr_list )

    #---------------------------------------------------------------------------------
    # PDF grid - mutually multiplied arrays of PDF
    #---------------------------------------------------------------------------------
    dG_grid = Property( depends_on = 'rf_change, rand_change' )
    @cached_property
    def _get_dG_grid( self ):
        if len( self.dG_ogrid ):
            return reduce( lambda x, y: x * y, self.dG_ogrid )
        else:
            return 1.0
Ejemplo n.º 25
0
class CompositeCrackBridgeView(ModelView):

    model = Instance(CompositeCrackBridge)
    results = Property(depends_on='model.E_m, model.w, model.Ll, model.Lr, model.reinforcement_lst+')
    @cached_property
    def _get_results(self):
        if self.model.w <= 0.0:
            self.model.w = 1e-15
        sigma_c = self.model.sigma_c
#         Kf_broken = np.sum(self.model.cont_fibers.sorted_V_f * self.model.cont_fibers.sorted_nu_r *
#                            self.model.cont_fibers.sorted_stats_weights * self.model.cont_fibers.sorted_E_f *
#                            self.model.cont_fibers.damage)
        if self.model.Ll > self.model.Lr:
            return -self.model._x_arr[::-1], self.model._epsm_arr[::-1], sigma_c, self.model._epsf_arr[::-1]
        else:
            return self.model._x_arr, self.model._epsm_arr, sigma_c, self.model._epsf_arr

    x_arr = Property(depends_on='model.E_m, model.w, model.Ll, model.Lr, model.reinforcement_lst+')
    @cached_property
    def _get_x_arr(self):
        return self.results[0]
 
    epsm_arr = Property(depends_on='model.E_m, model.w, model.Ll, model.Lr, model.reinforcement_lst+')
    @cached_property
    def _get_epsm_arr(self):
        return self.results[1]

    epsf_arr = Property(depends_on='model.E_m, model.w, model.Ll, model.Lr, model.reinforcement_lst+')
    @cached_property
    def _get_epsf_arr(self):
        return self.results[3]

    sigma_c = Property(depends_on='model.E_m, model.w, model.Ll, model.Lr, model.reinforcement_lst+')
    @cached_property
    def _get_sigma_c(self):
        return self.results[2]

    def sigma_c_arr(self, w_arr, u=False, damage=False):
        sigma_c_lst = []
        u_lst = []
        damage_lst = []
        for i, w in enumerate(w_arr):
            self.model.w = w
            sigma_c_lst.append(self.sigma_c)
            if u == True:
                u_lst.append(self.u_evaluated)
            if damage == True:
                damage_lst.append(np.sum(self.model.cont_fibers.damage *
                                         self.model.cont_fibers.sorted_stats_weights *
                                         self.model.cont_fibers.sorted_nu_r))
        if u == True or damage == True:
            return np.array(sigma_c_lst), np.array(u_lst), np.array(damage_lst)
        else:
            return np.array(sigma_c_lst)

    def secant_K(self, w_arr):
        secant_K_lst = []
        for w_i in w_arr:
            self.model.w = w_i
            secant_K_lst.append(self.model.secant_K)
        return np.array(secant_K_lst)

    u_evaluated = Property(depends_on='model.E_m, model.w, model.Ll, model.Lr, model.reinforcement_lst+')
    @cached_property
    def _get_u_evaluated(self):
        return self.model.w + np.trapz(self.epsm_arr, self.x_arr)

    sigma_c_max = Property(depends_on='model.E_m, model.w, model.Ll, model.Lr, model.reinforcement_lst+')
    @cached_property
    def _get_sigma_c_max(self):
        def minfunc_sigma(w):
            self.model.w = w
            stiffness_loss = np.sum(self.model.cont_fibers.Kf * self.model.cont_fibers.damage) / np.sum(self.model.cont_fibers.Kf)
            if stiffness_loss > 0.90:
                return 1. + w
            #plt.plot(w, self.sigma_c, 'ro')
            return -self.sigma_c
        def residuum_stiffness(w):
            self.model.w = w
            stiffness_loss = np.sum(self.model.Kf * self.model.damage) / np.sum(self.model.Kf)
            if stiffness_loss > 0.90:
                return 1. + w
            if stiffness_loss < 0.65 and stiffness_loss > 0.45:
                residuum = 0.0
            else:
                residuum = stiffness_loss - 0.5
            return residuum

        if len(self.model.sorted_reinf_lst[0]) == 0:
            # there are only short fibers
            def minfunc_short_fibers(w):
                self.model.w = w
                return -self.sigma_c
            w_max = fminbound(minfunc_short_fibers, 0.0, 3.0, maxfun=10, disp=0)
            return self.sigma_c, w_max
        else:
            # continuous or mixed fibers
            try:
                w_max = brentq(residuum_stiffness, 0.0, min(0.1 * (self.model.Ll + self.model.Lr), 20.))
            except:
                w_max = 0.03 * (self.model.Ll + self.model.Lr)
            w_points = np.linspace(0, w_max, len(self.model.reinforcement_lst) + 1)
            w_maxima = []
            sigma_maxima = []
            for i, w in enumerate(w_points[1:]):
                w_maxima.append(fminbound(minfunc_sigma, w_points[i], w_points[i + 1], maxfun=10, disp=0))
                sigma_maxima.append(self.sigma_c)
            return sigma_maxima[np.argmax(np.array(sigma_maxima))], w_maxima[np.argmax(np.array(sigma_maxima))]

    def apply_load(self, sigma):
        if sigma > self.sigma_c_max[0]:
            raise ValueError('applied load ', sigma , 'MPa is larger than composite strength ', self.sigma_c_max[0], 'MPa')
        else:
            def residuum(w):
                self.model.w = float(w)
                return sigma - self.sigma_c
            brentq(residuum, 0.0, min(self.sigma_c_max[1], 20.))

    def sigma_f_lst(self, w_arr):
        sigma_f_arr = np.zeros(len(w_arr) * 
                               len(self.model.reinforcement_lst)).reshape(len(w_arr),
                                len(self.model.reinforcement_lst))
        masks = [((self.model.sorted_xi == reinf.xi) * 
                          (self.model.sorted_E_f == reinf.E_f) * 
                          (self.model.sorted_V_f == reinf.V_f))
                 for reinf in self.model.reinforcement_lst]
        for i, w in enumerate(w_arr):
            if w == 0.0:
                self.model.w = 1e-15
            else:
                self.model.w = w
            self.model.damage
            for j, reinf in enumerate(self.model.reinforcement_lst):
                sigma_fi = np.sum(self.model._epsf0_arr * self.model.sorted_stats_weights * self.model.sorted_nu_r * 
                              self.model.sorted_E_f * (1. - self.model.damage) * masks[j])
                sigma_f_arr[i, j] = sigma_fi
        return sigma_f_arr

    Welm = Property(depends_on='model.E_m, model.w, model.Ll, model.Lr, model.reinforcement_lst+')
    @cached_property
    def _get_Welm(self):
        Km = self.results[4]
        bonded_l = self.epsm_arr[0] ** 2 * Km * (self.model.Ll - np.abs(self.x_arr[0]))
        bonded_r = self.epsm_arr[-1] ** 2 * Km * (self.model.Lr - np.abs(self.x_arr[-1]))
        return 0.5 * (np.trapz(self.epsm_arr ** 2 * Km, self.x_arr) + bonded_l + bonded_r) 

    Welf = Property(depends_on='model.E_m, model.w, model.Ll, model.Lr, model.reinforcement_lst+')
    @cached_property
    def _get_Welf(self):
        Kf = self.model.E_c - self.results[4]
        bonded_l = self.mu_epsf_arr[0] ** 2 * Kf * (self.model.Ll - np.abs(self.x_arr[0]))
        bonded_r = self.mu_epsf_arr[-1] ** 2 * Kf * (self.model.Lr - np.abs(self.x_arr[-1]))
        return 0.5 * (np.trapz(self.mu_epsf_arr ** 2 * Kf, self.x_arr) + bonded_l + bonded_r)

    W_el_tot = Property(depends_on='model.E_m, model.w, model.Ll, model.Lr, model.reinforcement_lst+')
    @cached_property
    def _get_W_el_tot(self):
        '''total elastic energy stored in the specimen'''
        return self.Welf + self.Welm

    W_inel_tot = Property(depends_on='model.E_m, model.w, model.Ll, model.Lr, model.reinforcement_lst+')
    @cached_property
    def _get_W_inel_tot(self):
        '''total inelastic energy dissipated during loading up to w'''
        return self.U - self.W_el_tot

    U_line = Property(depends_on='model.E_m, model.Ll, model.Lr, model.reinforcement_lst+, w_arr_energy')
    @cached_property
    def _get_U_line(self):
        '''work done by external force - mfn_line'''
        w_arr = self.w_arr_energy
        u_lst = []
        F_lst = []
        for w in w_arr:
            self.model.w = w
            u_lst.append(self.u_evaluated)
            F_lst.append(self.sigma_c)
        u_arr = np.array(u_lst)
        F_arr = np.array(F_lst)
        U_line = MFnLineArray(xdata=w_arr, ydata=np.hstack((0, cumtrapz(F_arr, u_arr))))
        return U_line

    U = Property(depends_on='model.E_m, model.Ll, model.Lr, model.reinforcement_lst+, model.w')
    @cached_property
    def _get_U(self):
        '''work done by external force U(w)'''
        return self.U_line.get_values(self.model.w)

    w_arr_energy = Array

    def get_sigma_m_x_input(self, sigma):
        self.apply_load(sigma)
        line = MFnLineArray(xdata=self.x_arr,
                            ydata=self.epsm_arr)
        return line.get_values(self.x_input)
Ejemplo n.º 26
0
class RV( HasTraits ):
    '''Class representing the definition and discretization of a random variable.
    '''
    name = Str

    pd = Instance( IPDistrib )
    def _pd_changed( self ):
        self.pd.n_segments = self._n_int

    changed = Event
    @on_trait_change( 'pd.changed,+changed' )
    def _set_changed( self ):
        self.changed = True

    _n_int = Int
    n_int = Property
    def _set_n_int( self, value ):
        if self.pd:
            self.pd.n_segments = value
        self._n_int = value
    def _get_n_int( self ):
        return self.pd.n_segments

    # index within the randomization
    idx = Int( 0 )

    # type of the RV discretization 
    discr_type = Enum( 'T grid', 'P grid', 'MC',
                        changed = True )
    def _discr_type_default( self ):
        return 'T grid'

    theta_arr = Property( Array( 'float_' ), depends_on = 'changed' )
    @cached_property
    def _get_theta_arr( self ):
        '''Get the discr_type of the pdistrib
        '''
        if self.discr_type == 'T grid':

            # Get the discr_type from pdistrib and shift it
            # such that the midpoints of the segments are used for the
            # integration.

            x_array = self.pd.x_array
            # Note assumption of T grid discr_type
            theta_array = x_array[:-1] + self.pd.dx / 2.0

        elif self.discr_type == 'P grid':

            # P grid disretization generated from the inverse cummulative
            # probability
            #
            distr = self.pd.distr_type.distr
            # Grid of constant probabilities
            pi_arr = linspace( 0.5 / self.n_int, 1. - 0.5 / self.n_int, self.n_int )
            theta_array = distr.ppf( pi_arr )

        return theta_array

    dG_arr = Property( Array( 'float_' ), depends_on = 'changed' )
    @cached_property
    def _get_dG_arr( self ):

        if self.discr_type == 'T grid':

            d_theta = self.theta_arr[1] - self.theta_arr[0]
            return self.pd.get_pdf_array( self.theta_arr ) * d_theta

        elif self.discr_type == 'P grid':

            # P grid disretization generated from the inverse cummulative
            # probability
            #
            return array( [ 1.0 / float( self.n_int ) ], dtype = 'float_' )

    def get_rvs_theta_arr( self, n_samples ):
        return self.pd.get_rvs_array( n_samples )
Ejemplo n.º 27
0
class YMBFieldVar(HasTraits):
    data = Instance(IYMBData)

    n_cols = Property()

    def _get_n_cols(self):
        return self.data.n_cuts

    var_enum = Trait('radius', var_dict, modified=True)
    scalar_arr = Property(depends_on='var_enum')

    def _get_scalar_arr(self):
        return getattr(self.data, self.var_enum_)

    sorted_on = Bool(False, modified=True)

    scalar_arr_sorted = Property(depends_on='var_enum')

    def _get_scalar_arr_sorted(self):
        ''' Return scalar array sorted by the shortest distance from the edge
        '''
        scalar_arr = zeros_like(getattr(self.data, self.var_enum_))
        scalar_mask_arr = zeros_like(getattr(self.data, self.var_enum_))
        distance_arr = self.data.edge_distance.filled()
        for i in range(0, self.n_cols):
            scalar_mask_arr[:, i] = zip(
                *sorted(zip(distance_arr[:, i],
                            getattr(self.data, self.var_enum_).mask[:, i]),
                        reverse=True))[1]
            scalar_arr[:, i] = zip(
                *sorted(zip(distance_arr[:, i],
                            getattr(self.data, self.var_enum_).filled()[:, i]),
                        reverse=True))[1]
        return ma.array(scalar_arr, mask=array(scalar_mask_arr, dtype=bool))

    figure = Instance(Figure, ())

    def _figure_default(self):
        figure = Figure()
        figure.add_axes([0.1, 0.1, 0.8, 0.8])
        return figure

    data_changed = Event(True)

    @on_trait_change('+modified, data')
    def _redraw(self):
        self.figure.clear()
        self.figure.add_axes([0.1, 0.1, 0.8, 0.8])
        figure = self.figure
        axes = figure.axes[0]
        axes.clear()

        if self.sorted_on == True:
            scalar_arr = self.scalar_arr_sorted
        else:
            scalar_arr = self.scalar_arr

        xi = linspace(min(self.data.cut_x), max(self.data.cut_x), 100)

        x = (ones_like(scalar_arr) * self.data.cut_x).flatten()
        ny_row = scalar_arr.shape[0]
        dy = max(diff(self.data.cut_x))
        yi = linspace(0, ny_row * dy, ny_row)
        y = (ones_like(scalar_arr).T *
             linspace(0, ny_row * dy, ny_row)).T.flatten()
        z = scalar_arr.flatten()
        zi = griddata(x, y, z, xi, yi, interp='nn')

        # contour the gridded data, plotting dots at the nonuniform data points
        # axes.contour( xi, yi, zi, 20, linewidths = .5, colors = 'k' )
        # plotting filled contour
        axes.contourf(xi, yi, zi, 200, cmap=my_cmap_lin)  # my_cmap_lin
        scat = axes.scatter(x,
                            y,
                            marker='o',
                            c=z,
                            s=20,
                            linewidths=0,
                            cmap=my_cmap_lin)
        figure.colorbar(scat)

        self.data_changed = True

    view = View('var_enum',
                'sorted_on',
                Item('figure',
                     style='custom',
                     editor=MPLFigureEditor(),
                     show_label=False),
                id='yarn_structure_view',
                resizable=True,
                scrollable=True,
                dock='tab',
                width=0.8,
                height=0.4)
class _MFnMatplotlibEditor(Editor):

    # @todo This is not used here, either activate or delete
    scrollable = True

    # adjustable parameters
    adapter = Instance(MFnMultiPlotAdapter)

    # the plot representation in matplotlib
    figure = Instance(Figure(facecolor=MFnMultiPlotAdapter().bgcolor), ())

    def init(self, parent):

        factory = self.factory
        self.adapter = factory.adapter

        self.control = self._create_canvas(parent)
        self.value.on_trait_change(self.update_editor, 'data_changed')

    def update_editor(self):
        figure = self.figure
        axes = figure.add_subplot(111)
        canvas = figure.canvas
        if canvas is None:
            pass
        else:
            figure.delaxes(axes)
            self._refresh_plot()
            figure.canvas.draw()

    def _create_canvas(self, parent):
        """ Create the MPL canvas. """
        # The panel lets us add additional controls.
        fig = self.figure

        panel = wx.Panel(parent, -1, style=wx.CLIP_CHILDREN)
        sizer = wx.BoxSizer(wx.VERTICAL)
        panel.SetSizer(sizer)
        #
        # matplotlib commands to create a canvas
        mpl_control = FigureCanvas(panel, -1, fig)
        toolbar = NavigationToolbar2Wx(mpl_control)
        sizer.Add(toolbar, 0, wx.EXPAND)
        sizer.Add(mpl_control, 1, wx.LEFT | wx.TOP | wx.GROW)
        fig.canvas.SetMinSize((100, 100))
        return panel

    def _refresh_plot(self):

        a = self.adapter
        figure = self.figure
        mfn_multiline = self.value
        plot_set = []
        for data in mfn_multiline.xdata:
            plot_set.append(any(array(data != [0., 1.])))
        idx = where(array(plot_set) == True)[0]
        xdata = []
        ydata = []
        for index in idx:
            xdata.append(mfn_multiline.xdata[index]),
            ydata.append(mfn_multiline.ydata[index])
        legend = array(a.legend_labels)[idx]
        color = array(a.mline_color)[idx]
        style = array(a.mline_style)[idx]
        width = array(a.mline_width)[idx]

        if a.var_x != '':
            label_x = getattr(self.object, a.var_x)
        else:
            label_x = a.label_x

        if a.var_y != '':
            label_y = getattr(self.object, a.var_y)
        else:
            label_y = a.label_y

        axes = figure.add_subplot(111)

        for x,y,c,s,w in zip(xdata[:], ydata[:], \
                         color[:], style[:], width[:]):
            axes.plot(x, y, color=c, linestyle=s, linewidth=w)
        axes.set_xlabel(a.label_x, weight='semibold')
        axes.set_ylabel(a.label_y, weight='semibold')
        axes.set_title( a.title,\
                        size = 'large', color = 'black',\
                        weight = 'bold', position = (.5,1.03))
        axes.set_axis_bgcolor(color='white')
        axes.ticklabel_format(scilimits=a.scilimits)
        axes.grid(color='gray', linestyle='--', linewidth=0.1, alpha=0.4)
        axes.legend((legend), loc='best')
        axes.set_xscale(a.xscale)
        axes.set_yscale(a.yscale)
        if a.xticks == 0:
            pass
        else:
            axes.xaxis.set_major_locator(MaxNLocator(a.xticks))
        if a.yticks == 0:
            pass
        else:
            axes.yaxis.set_major_locator(MaxNLocator(a.yticks))
Ejemplo n.º 29
0
class ExpEM(ExType):
    '''Experiment: Elastic Modulus Test
    '''
    #    label = Str('three point bending test')

    implements(IExType)

    file_ext = 'TRA'

    #--------------------------------------------------------------------
    # register a change of the traits with metadata 'input'
    #--------------------------------------------------------------------

    input_change = Event

    @on_trait_change('+input, ccs.input_change, +ironing_param')
    def _set_input_change(self):
        self.input_change = True

    #--------------------------------------------------------------------------------
    # specify inputs:
    #--------------------------------------------------------------------------------

    edge_length = Float(0.06,
                        unit='m',
                        input=True,
                        table_field=True,
                        auto_set=False,
                        enter_set=True)
    height = Float(0.12,
                   unit='m',
                   input=True,
                   table_field=True,
                   auto_set=False,
                   enter_set=True)
    gauge_length = Float(0.10,
                         unit='m',
                         input=True,
                         table_field=True,
                         auto_set=False,
                         enter_set=True)

    # age of the concrete at the time of testing
    age = Int(39,
              unit='d',
              input=True,
              table_field=True,
              auto_set=False,
              enter_set=True)
    loading_rate = Float(0.6,
                         unit='MPa/s',
                         input=True,
                         table_field=True,
                         auto_set=False,
                         enter_set=True)

    #--------------------------------------------------------------------------
    # composite cross section
    #--------------------------------------------------------------------------

    ccs = Instance(CompositeCrossSection)

    def _ccs_default(self):
        '''default settings correspond to 
        setup '7u_MAG-07-03_PZ-0708-1'
        '''
        #        fabric_layout_key = 'MAG-07-03'
        #        fabric_layout_key = '2D-02-06a'
        fabric_layout_key = '2D-05-11'
        #        concrete_mixture_key = 'PZ-0708-1'
        concrete_mixture_key = 'FIL-10-09'
        orientation_fn_key = 'all0'
        #        orientation_fn_key = 'all90'
        #        orientation_fn_key = '90_0'
        n_layers = 12
        s_tex_z = 0.060 / (n_layers + 1)
        ccs = CompositeCrossSection(fabric_layup_list=[
            plain_concrete(s_tex_z * 0.5),
            FabricLayUp(n_layers=n_layers,
                        orientation_fn_key=orientation_fn_key,
                        s_tex_z=s_tex_z,
                        fabric_layout_key=fabric_layout_key),
            plain_concrete(s_tex_z * 0.5)
        ],
                                    concrete_mixture_key=concrete_mixture_key)
        return ccs

    #--------------------------------------------------------------------------
    # Get properties of the composite
    #--------------------------------------------------------------------------

    # E-modulus of the composite at the time of testing
    E_c = Property(Float,
                   unit='MPa',
                   depends_on='input_change',
                   table_field=True)

    def _get_E_c(self):
        return self.ccs.get_E_c_time(self.age)

    # E-modulus of the composite after 28 days
    E_c28 = DelegatesTo('ccs', listenable=False)

    # reinforcement ration of the composite
    rho_c = DelegatesTo('ccs', listenable=False)

    #--------------------------------------------------------------------------------
    # define processing
    #--------------------------------------------------------------------------------

    def _read_data_array(self):
        ''' Read the experiment data. 
        '''
        print 'READ FILE'
        _data_array = np.loadtxt(self.data_file,
                                 delimiter=';',
                                 skiprows=2,
                                 usecols=(4, 5, 10, 17, 20))
        self.data_array = _data_array

    names_and_units = Property

    @cached_property
    def _get_names_and_units(self):
        ''' Set the names and units of the measured data.
        '''
        names = ['delta_u1', 'delta_u2', 'w', 'F', 'time']
        units = ['mm', 'mm', 'mm', 'kN', 's']
        return names, units


#0#"Arbeit";
#1#"Dehn. abs";
#2#"Dehn. abs (2. Kanal)";
#3#"Dehnung";

#4#"Dehnung (1. Kanal)";
#5#"Dehnung (2. Kanal)";

#6#"Dehnung nominell";
#7#"DeltaE";
#8#"E1 korr";
#9#"E2 korr";

#10#"Kolbenweg";

#11#"Kolbenweg abs.";
#12#"Lastrahmen";
#13#"LE-Kanal";
#14#"PrXXXfzeit";
#15#"Querdehnung";
#16#"S korr";
#17#"Standardkraft";
#18#"Vorlaufzeit";
#19#"Weg";
#20#"Zeit";
#21#"Zyklus"

#
#"Nm";
#"mm";
#"mm";
#"mm";
#"mm";
#"mm";
#"mm";
#"%";
#"mm";
#" ";
#"mm";
#"mm";
#" ";
#"mm";
#"s";
#"mm";
#" ";
#"N";
#"s";
#"mm";
#"s";
#" "

#--------------------------------------------------------------------------------
# plot templates
#--------------------------------------------------------------------------------

    plot_templates = {
        'force / displacement': '_plot_force_displacement',
        'stress / strain': '_plot_stress_strain',
        'stress / time': '_plot_stress_time',
    }

    default_plot_template = 'force / displacement'

    def _plot_force_displacement(self, axes):
        xkey = 'deflection [mm]'
        ykey = 'force [kN]'
        xdata = self.w
        ydata = self.F / 1000.  # convert from [N] to [kN]
        axes.set_xlabel('%s' % (xkey, ))
        axes.set_ylabel('%s' % (ykey, ))
        axes.plot(xdata, ydata
                  # color = c, linewidth = w, linestyle = s
                  )

    def _plot_stress_strain(self, axes):
        sig = (self.F / 1000000.) / (self.edge_length**2
                                     )  # convert from [N] to [MN]
        eps1 = (self.delta_u1 / 1000.) / self.gauge_length
        eps2 = (self.delta_u2 / 1000.) / self.gauge_length
        eps_m = (eps1 + eps2) / 2.
        axes.plot(eps_m, sig, color='blue', linewidth=2)

    def _plot_stress_time(self, axes):
        sig = (self.F / 1000000.) / (self.edge_length**2
                                     )  # convert from [N] to [MN]
        axes.plot(self.time, sig, color='blue', linewidth=2)

    def _plot_displ_time(self, axes):
        axes.plot(self.time, self.displ, color='blue', linewidth=2)

    #--------------------------------------------------------------------------------
    # view
    #--------------------------------------------------------------------------------

    traits_view = View(VGroup(
        Group(Item('edge_length', format_str="%.3f"),
              Item('height', format_str="%.3f"),
              Item('gauge_length', format_str="%.3f"),
              label='geometry'),
        Group(Item('loading_rate'), Item('age'), label='loading rate and age'),
        Group(Item('E_c', show_label=True, style='readonly',
                   format_str="%.0f"),
              Item('ccs@', show_label=False),
              label='composite cross section')),
                       scrollable=True,
                       resizable=True,
                       height=0.8,
                       width=0.6)
Ejemplo n.º 30
0
class SCM(HasTraits):
    '''Stochastic Cracking Model - compares matrix strength and stress,
    inserts new CS instances at positions, where the matrix strength
    is lower than the stress; evaluates stress-strain diagram
    by integrating the strain profile along the composite'''
    
    length = Float(desc='composite specimen length')
    nx = Int(desc='number of discretization points')
    E_m = Float
    reinforcement = Instance(Reinforcement)
    
    interpolator = Property(Instance(Interpolator), depends_on = 'E_m, reinforcement, +load')
    @cached_property
    def _get_interpolator(self):
        single_CB = CompositeCrackBridge(E_m=self.E_m,
                                         reinforcement_lst=[self.reinforcement])
        CB_model = CompositeCrackBridgePostprocessor(model=single_CB)
        return Interpolator(CB_model = CB_model,
                             load_sigma_c_max = self.load_sigma_c_max,
                             load_n_sigma_c = self.load_n_sigma_c,
                             n_w = 80, n_x = 61, n_BC = 6
                             )
    
    sigma_c_crack = List
    cracks_list = List

    load_sigma_c = Property(depends_on='+load')
    @cached_property
    def _get_load_sigma_c(self):
        # applied external load in terms of composite stress
        return np.linspace(self.load_sigma_c_min,
                           self.load_sigma_c_max, self.load_n_sigma_c)

    load_sigma_c_min = Float(load=True)
    load_sigma_c_max = Float(load=True)
    load_n_sigma_c = Int(load=True)

    x_arr = Property(Array, depends_on='length, nx')

    @cached_property
    def _get_x_arr(self):
        # discretizes the specimen length
        return np.linspace(0., self.length, self.nx)

    random_field = Instance(RandomField)
    
    matrix_strength = Property(depends_on='random_field.+modified')
    @cached_property
    def _get_matrix_strength(self):
        # evaluates a random field
        # realization and creates a spline reprezentation
        rf = self.random_field.random_field
        rf_spline = interp1d(self.random_field.xgrid, rf)
        return rf_spline(self.x_arr)

    def sort_cbs(self):
        # sorts the CBs by position and adjusts the boundary conditions
        # sort the CBs
        cb_list = self.cracks_list[-1]
        crack_position = cb_list[-1].position
        cb_list = sorted(cb_list, key=attrgetter('position'))
        # find idx of the new crack
        for i, crack in enumerate(cb_list):
            if crack.position == crack_position:
                idx = i
        # specify the boundaries
        if idx != 0:
            # there is a crack at the left hand side
            cbl = cb_list[idx - 1]
            cb = cb_list[idx]
            cbl.Lr = (cb.position - cbl.position) / 2.
            cb.Ll = cbl.Lr
        else:
            # the new crack is the first from the left hand side
            cb_list[idx].Ll = cb_list[idx].position

        if idx != len(cb_list) - 1:
            # there is a crack at the right hand side
            cb, cbr = cb_list[idx], cb_list[idx + 1]
            cbr.Ll = (cbr.position - cb.position) / 2.
            cb.Lr = cbr.Ll
        else:
            # the new crack is the first from the right hand side
            cb_list[idx].Lr = self.length - cb_list[idx].position

        # specify the x range and stress profile for
        # the new crack and its neighbors
        idxs = [idx - 1, idx, idx + 1]
        if idx == 0:
            idxs.remove(-1)
        if idx == len(cb_list) - 1:
            idxs.remove(len(cb_list))
        for idx in idxs:
            mask1 = self.x_arr >= (cb_list[idx].position - cb_list[idx].Ll)
            if idx == 0:
                mask1[0] = True
            mask2 = self.x_arr <= (cb_list[idx].position + cb_list[idx].Lr)
            cb_list[idx].x = self.x_arr[mask1 * mask2] - cb_list[idx].position
        self.cracks_list[-1] = cb_list

    def cb_list(self, load):
        if len(self.cracks_list) is not 0:
            idx = np.sum(np.array(self.sigma_c_crack) < load) - 1
            if idx == -1:
                return [None]
            else:
                return self.cracks_list[idx]
        else:
            return [None]

    def sigma_m(self, load):
        Em = self.E_m
        Ef = self.reinforcement.E_f
        Vf = self.reinforcement.V_f
        Ec = Ef * Vf + Em * (1. - Vf)
        sigma_m = load * Em / Ec * np.ones(len(self.x_arr))
        cb_load = self.cb_list(load)
        if cb_load[0] is not None:
            for cb in cb_load:
                crack_position_idx = np.argwhere(self.x_arr == cb.position)
                left = crack_position_idx - len(np.nonzero(cb.x < 0.)[0])
                right = crack_position_idx + len(np.nonzero(cb.x > 0.)[0]) + 1
                sigma_m[left:right] = cb.get_epsm_x_w(load).T * self.E_m
        return sigma_m

    def epsf_x(self, load):
        Em = self.E_m
        Ef = self.reinforcement.E_f
        Vf = self.reinforcement.V_f
        Ec = Ef * Vf + Em * (1. - Vf)
        epsf_x = load / Ec * np.ones(len(self.x_arr))
        cb_load = self.cb_list(load)
        if cb_load[0] is not None:
            for cb in cb_load:
                crack_position_idx = np.argwhere(self.x_arr == cb.position)
                left = crack_position_idx - len(np.nonzero(cb.x < 0.)[0])
                right = crack_position_idx + len(np.nonzero(cb.x > 0.)[0]) + 1
                epsf_x[left:right] = cb.get_epsf_x_w(load).T
        return epsf_x

    def residuum(self, q):
        return np.min(self.matrix_strength - self.sigma_m(q))

    def evaluate(self):
        # seek for the minimum strength redundancy to find the position
        # of the next crack
        last_pos = pi
        sigc_min = 0.0
        sigc_max = self.load_sigma_c_max
        while np.any(self.sigma_m(sigc_max) > self.matrix_strength):
            sigc_min = brentq(self.residuum, sigc_min, sigc_max)
            crack_position = self.x_arr[np.argmin(self.matrix_strength -
                                                  self.sigma_m(sigc_min))]
            new_cb = CB(position=float(crack_position),
                     crack_load_sigma_c=sigc_min - self.load_sigma_c_max / 1000.,
                     interpolator=self.interpolator)
            self.sigma_c_crack.append(sigc_min - self.load_sigma_c_max / 1000.)
            if len(self.cracks_list) is not 0:
                self.cracks_list.append(copy.copy(self.cracks_list[-1])
                                        + [new_cb])
            else:
                self.cracks_list.append([new_cb])
            self.sort_cbs()
            cb_list = self.cracks_list[-1]
            cb = [cbi for cbi in cb_list if
                  cbi.position == float(crack_position)][0]
            sigc = cb.get_sigma_c_x(self.load_sigma_c).flatten()
            new_sigc_max = np.max(sigc[np.isnan(sigc) == False])
#             plt.plot(self.x_arr, self.epsf_x(sigc_min))
#             plt.plot(self.x_arr, self.sigma_m(sigc_min)/self.E_m)
#             plt.plot(self.x_arr, self.matrix_strength / self.E_m)
#             plt.show()
            if new_sigc_max < sigc_max:
                sigc_max = new_sigc_max
            if float(crack_position) == last_pos:
                print last_pos
                raise ValueError('''got stuck in loop,
                try to adapt x, w, BC ranges''')
            last_pos = float(crack_position)