Пример #1
0
 def test_convert_normal(self):
     self.assertAlmostEqual(
         props.rv(rtype="n",
                  args=props.attr(name="test", rtype="n", mx=1e-10,
                                  vx=1e10).get_convert().args).mean, 1e-10)
     self.assertAlmostEqual(
         props.rv(rtype="n",
                  args=props.attr(name="test", rtype="n", mx=1e-10,
                                  vx=1e10).get_convert().args).sd,
         1e-10 * 1e10)
     self.assertAlmostEqual(
         props.rv(rtype="n",
                  args=props.attr(name="test", rtype="n", mx=-5,
                                  vx=4.3).get_convert().args).mean, -5)
Пример #2
0
 def test_convert_exponential(self):
     self.assertAlmostEqual(
         props.rv(rtype="e",
                  args=props.attr(name="test", rtype="e", mx=4.5,
                                  vx=1.2).get_convert().args).mean, 4.5)
     self.assertAlmostEqual(
         props.rv(rtype="e",
                  args=props.attr(name="test", rtype="e", mx=4.5,
                                  vx=1.2).get_convert().args).sd, 4.5 * 1.2)
     self.assertAlmostEqual(
         props.rv(rtype="e",
                  args=props.attr(name="test", rtype="e", mx=0.05,
                                  vx=0.1).get_convert().args).mean, 0.05)
     self.assertAlmostEqual(
         props.rv(rtype="e",
                  args=props.attr(name="test", rtype="e", mx=0.05,
                                  vx=0.1).get_convert().args).sd,
         0.05 * 0.1)
Пример #3
0
 def test_convert_uniform(self):
     self.assertAlmostEqual(
         props.rv(rtype="u",
                  args=props.attr(name="test", rtype="u", mx=0,
                                  vx=5).get_convert().args).mean, 2.5)
     self.assertAlmostEqual(
         props.rv(rtype="u",
                  args=props.attr(name="test", rtype="u", mx=0,
                                  vx=5).get_convert().args).sd,
         math.sqrt((5**2) / 12))
     self.assertAlmostEqual(
         props.rv(rtype="u",
                  args=props.attr(name="test", rtype="u", mx=6,
                                  vx=4).get_convert().args).mean, 8)
     self.assertAlmostEqual(
         props.rv(rtype="u",
                  args=props.attr(name="test", rtype="u", mx=6,
                                  vx=4).get_convert().args).sd,
         math.sqrt((4**2) / 12))
Пример #4
0
    def susipf(self,
               mx,
               vx,
               rtype,
               ftype="interp",
               domain=[-1e10, 1e10],
               mono="i",
               stairb="n",
               selec=-1):  #change domain for e.g. lognormals >0
        '''
        ###################################################################
        Description:

        After having calculated a result object by get_result, we can now
        base on this result to derive a failure probability by interpolation.
        This function computes the failure probability for a specific interpolation
        method and a stochastic distribution of the dynamic variable xk.
        We can also choose on which of the calculated results, the interpolation
        shall be based on by adjusting parameter "selec" 

        ###################################################################
        Parameters:
        
        mx: float

            mean value of the dynamic variable

        vx: positive float

            coefficient of variation of the dynamic variable

        rtype: str in {"n","ln","e","u","g"}

            distribution type of the dynamic variable
            {"n":normal,"ln":lognormal,"e":exponential,
            "u"; uniform, "g": Gumbel}


        ftype: str in {"stair","interp"}, default= "interp"
            
            selects the interpolation method, 
            {"stair": staircase approach according to monotonicity "mono" parameter,
            "interp": PCHIP interpolalation with monotonicity in "mono" parameter}

        domain: [float,float], default=[-1e10,1e10]

            domain boundaries for interpolation, values of xk exceeding the domain
            are ignored!

        mono: str in {"i","d"}

              monotonicity of the conditional failure function
              if "i", then we assume the failure probability is 
              increasing if xk is increased
              if "d", then we assume the failure probability is
              decreasing if xk is increased

        stairb: str in {"y", "n"}, default="n"

              if "n", we interpolate normally
              if "y", we can derive boundaries for the result 
              for interpolation method "stair", indeed "y" allows
              to compute the result as if the underlying conditional
              failure probability function was e.g. "i" instead of "d"

        selec: integer, default=-1

              is the index of the result element from self.results
              that is chosen-1 refers to the last computed result 
              in the list of all computed results
        

        ###################################################################
        Returns:

        [result,outxspan_lower,outxspan_upper,x1]

        result: the result by interpolation

        other variables: additional information


        '''

        #added hold for staircase boundaries (no reversing here)

        first_susi = self.results[selec].itype.index("susi")

        #remove doubles sus, first order by yvals to know where mc and sus estimates are
        sortedxy = sorted(zip(self.results[selec].xk, self.results[selec].pfi),
                          key=lambda x: x[1])
        [xvals, yvals] = [np.array(sortedxy)[:, 0], np.array(sortedxy)[:, 1]]
        if "sus" in self.results[
                selec].itype:  #this decision is needed otherwise neglecting first xk
            [xvals, yvals] = [
                xvals[0:min(-first_susi + 1, -1)],
                yvals[0:min(-first_susi + 1, -1)]
            ]  #min with -1 if =1 then 0:0
        else:
            pass  #all xk used

        #increasing order in xvals
        if mono == "d":  #reverse order
            [xvals, yvals] = [xvals[::-1], yvals[::-1]]

        condv_rv = props.attr(name="check_xk", rtype=rtype, mx=mx,
                              vx=vx).get_convert()
        f_lower = condv_rv.cdf_b(b1=domain[0], b2=xvals[0])
        f_upper = condv_rv.cdf_b(b1=xvals[-1], b2=domain[-1])

        if stairb == "n":  #normal stair approximation, not for the boundary
            if mono == "i":
                outxspan_lower = f_lower * yvals[0]
                outxspan_upper = f_upper * 1.0
            else:
                outxspan_lower = f_lower * 1.0
                outxspan_upper = f_upper * yvals[-1]
        else:  #act as if mono but is boundary
            if mono == "i":
                outxspan_lower = f_lower * yvals[0]
                outxspan_upper = f_upper * 0.0
            else:
                outxspan_lower = f_lower * 0.0
                outxspan_upper = f_upper * yvals[-1]

        #original order
        if mono == "d":  #reverse order
            [xvals, yvals] = [xvals[::-1], yvals[::-1]]

        if ftype == "interp":
            if mono == "i":
                xspan_result = scipy.integrate.quad(
                    lambda x: max(0.0, self.interp(x, selec, mono=mono)) *
                    condv_rv.pdf(x), max(xvals[0], domain[0]),
                    min(xvals[-1], domain[1]))[0]
            else:
                xspan_result = scipy.integrate.quad(
                    lambda x: max(0.0, self.interp(x, selec, mono=mono)) *
                    condv_rv.pdf(x), max(xvals[-1], domain[0]),
                    min(xvals[0], domain[1]))[0]
            x1 = 0
        elif ftype == "stair":
            if mono == "i":
                #if hold="y": reverse same below
                xspan_result = np.sum([
                    condv_rv.cdf_b(xvals[i], xvals[i + 1]) * yvals[i + 1]
                    for i in range(len(xvals) - 1)
                ])
                x1 = [
                    condv_rv.cdf_b(xvals[i], xvals[i + 1]) * yvals[i + 1]
                    for i in range(len(xvals) - 1)
                ]
            else:
                #above: ordered according to x, but as we want to have a boundary for not really decreasing/increasing functions resp.
                #[xvals,yvals]=[xvals[::-1],yvals[::-1]]
                xspan_result = np.sum([
                    condv_rv.cdf_b(xvals[i], xvals[i + 1]) * yvals[i]
                    for i in range(len(xvals) - 1)
                ])
                x1 = [
                    condv_rv.cdf_b(xvals[i], xvals[i + 1]) * yvals[i]
                    for i in range(len(xvals) - 1)
                ]

        result = outxspan_lower + xspan_result + outxspan_upper

        return ([result, outxspan_lower, outxspan_upper, x1])
Пример #5
0
    def regresult(self,
                  mx,
                  vx,
                  rtype,
                  n_knots=100,
                  boundary_dist=5,
                  smooth_k=3,
                  startres=0,
                  endres=None):
        '''
        ###################################################################
        Description:

        Compute the failure probability by regression, applying smoothing splines 
        on the data points of several susi results
        Most derivations are performed in function self.regspline 

        ###################################################################
        Parameters:

        mx: float

            mean value of the dynamic variable

        vx: positive float

            coefficient of variation of the dynamic variable

        rtype: str in {"n","ln","e","u","g"}

            distribution type of the dynamic variable
            {"n":normal,"ln":lognormal,"e":exponential,
            "u"; uniform, "g": Gumbel}

        n_knots: positive integerm default=100

            number of knots for building the smoothing spline for regression
    
        boundary_dist: positive integer, default=5

            do avoid taking knots right at the end points, select distance

        smooth_k: k in LSQUnivariateSpline

        startres: integer, default=0

            select index of first result in self.results that is used for regression

        endres: integer

            select index of last result in self.results that is used for regression
            
            e.g.
                startres+10 means we use the grid points of 10 results

        ###################################################################
        returns:

        estimated failure probability by regression    
        

        '''
        condv_rv = props.attr(name="check_xk", rtype=rtype, mx=mx,
                              vx=vx).get_convert()
        if endres == None:
            endres = len(self.results)
        [rsp, datapoints] = self.regspline(n_knots, boundary_dist, smooth_k,
                                           startres, endres)
        r1 = scipy.integrate.quad(lambda x: max(0.0, rsp(x)) * condv_rv.pdf(x),
                                  datapoints[0, 0], datapoints[-1, 0])
        return (r1[0])