Exemplo n.º 1
0
def calc_pml_fatalities(fatalities, nu, return_period=None):
    """Compute the probable maximum loss fatalities (PMLf) curve for a standard
    probabilistic EQRM risk run.

    Inputs:
    fatalities   Vector containing the fatality estimates in numbers for
                   each event with the corresponding nu. note that this can be
                   aggregated fatalities across all sites or it can simply be
                   the fatalities at a specific site.
    nu             1D column array the event activity of each of the simulated
                   events.
    return_period   provide single return_period if usng this function 
                    to compute exceedance fatalties for desired return period. 
                    If None a uniform sample of return periods will be created.

    Returns:
    pmlf_data       (nx3) array containing the PMLf curve. The first column
                   contains the probability of exceedance (in one year) values,
                   the second column contains the direct financial losses
                   for each of the probabilities of exceedance and the third 
                   column contains the financial losses as a percentage of the 
                   total building value.
    """

    # get total fatalities
    # num.sum(saved_fatalities, axis=0) # sum rows

    # Define return periods of interest and return *rates*
    if return_period is None:
        return_periods = num.logspace(1, 6, 25)
    else:
        return_periods = return_period
    rtrn_rte = 1. / return_periods

    # only 1 element on left gets first of returned tuple only?
    (trghzd_agg, _, _) = calc_annloss.acquire_riskval(fatalities, nu, rtrn_rte)

    ProbExceedSmall = 1 - num.exp(-rtrn_rte)

    return [ProbExceedSmall, trghzd_agg]
Exemplo n.º 2
0
def calc_pml_fatalities(fatalities, nu, return_period=None):
    """Compute the probable maximum loss fatalities (PMLf) curve for a standard
    probabilistic EQRM risk run.

    Inputs:
    fatalities   Vector containing the fatality estimates in numbers for
                   each event with the corresponding nu. note that this can be
                   aggregated fatalities across all sites or it can simply be
                   the fatalities at a specific site.
    nu             1D column array the event activity of each of the simulated
                   events.
    return_period   provide single return_period if usng this function 
                    to compute exceedance fatalties for desired return period. 
                    If None a uniform sample of return periods will be created.

    Returns:
    pmlf_data       (nx3) array containing the PMLf curve. The first column
                   contains the probability of exceedance (in one year) values,
                   the second column contains the direct financial losses
                   for each of the probabilities of exceedance and the third 
                   column contains the financial losses as a percentage of the 
                   total building value.
    """

    # get total fatalities
    # num.sum(saved_fatalities, axis=0) # sum rows

    # Define return periods of interest and return *rates*
    if return_period is None:
        return_periods = num.logspace(1, 6, 25)
    else:
        return_periods = return_period
    rtrn_rte = 1./return_periods

    # only 1 element on left gets first of returned tuple only?
    (trghzd_agg, _, _) = calc_annloss.acquire_riskval(fatalities, nu, rtrn_rte)

    ProbExceedSmall = 1-num.exp(-rtrn_rte)
    
    return [ProbExceedSmall, trghzd_agg]
Exemplo n.º 3
0
def calc_pml(saved_ecloss, saved_ecbval2, nu):
    """Compute the probable maximum loss (PML) curve for a standard
    probabilistic EQRM risk run.

    Inputs:
    saved_ecloss   2D array containing the damage estimates in dollars for
                   each building, multiplied by the survey factor. Note that 
                   the matrix has one row for each simulated event and one
                   column for each building.
    saved_ecbval2  1D row array containing the value of each building,
                   multiplied by the survey factor.
    nu             1D column array the event activity of each of the simulated
                   events.

    Returns:
    pml_data       (nx3) array containing the PML curve. The first column
                   contains the probability of exceedance (in one year) values,
                   the second column contains the direct financial losses
                   for each of the probabilities of exceedance and the third 
                   column contains the financial losses as a percentage of the 
                   total building value.
    """

    # get total building value
    TotalBVal2 = num.sum(saved_ecbval2)
    AggEcLoss = num.sum(saved_ecloss, axis=0)  # sum rows

    # Define return periods of interest and return *rates*
    return_periods = num.logspace(1, 6, 25)
    rtrn_rte = 1. / return_periods

    # only 1 element on left gets first of returned tuple only?
    (trghzd_agg, _, _) = calc_annloss.acquire_riskval(AggEcLoss, nu, rtrn_rte)

    ProbExceedSmall = 1 - num.exp(-rtrn_rte)

    return [ProbExceedSmall, trghzd_agg, trghzd_agg / TotalBVal2 * 100]
Exemplo n.º 4
0
def calc_pml(saved_ecloss, saved_ecbval2, nu):
    """Compute the probable maximum loss (PML) curve for a standard
    probabilistic EQRM risk run.

    Inputs:
    saved_ecloss   2D array containing the damage estimates in dollars for
                   each building, multiplied by the survey factor. Note that 
                   the matrix has one row for each simulated event and one
                   column for each building.
    saved_ecbval2  1D row array containing the value of each building,
                   multiplied by the survey factor.
    nu             1D column array the event activity of each of the simulated
                   events.

    Returns:
    pml_data       (nx3) array containing the PML curve. The first column
                   contains the probability of exceedance (in one year) values,
                   the second column contains the direct financial losses
                   for each of the probabilities of exceedance and the third 
                   column contains the financial losses as a percentage of the 
                   total building value.
    """

    # get total building value
    TotalBVal2 = num.sum(saved_ecbval2)
    AggEcLoss = num.sum(saved_ecloss, axis=0) # sum rows

    # Define return periods of interest and return *rates*
    return_periods = num.logspace(1, 6, 25)
    rtrn_rte = 1./return_periods

    # only 1 element on left gets first of returned tuple only?
    (trghzd_agg, _, _) = calc_annloss.acquire_riskval(AggEcLoss, nu, rtrn_rte)

    ProbExceedSmall = 1-num.exp(-rtrn_rte)
    
    return [ProbExceedSmall, trghzd_agg, trghzd_agg/TotalBVal2*100]
def calc_annfatalities_deagg_distmag(saved_fatalities, nu, saved_rjb,
                               aus_mag, momag_bin, R_bin, Zlim,
                               R_extend_flag=False):
    """Calculate the % of annualised fatalities disaggregated by mag and distance.

    It is the annualised fatalities disaggregated by mag and distance/ total
    annualised fatalities, as a percentage.

    Inputs:
    saved_fatalities    fatalities, dimensions(event, structure)
    nu              event activity, dimensions(event)
    saved_rjb       rjb distances array, dimensions(event, structure)
    aus_mag         earthquake magnitudes, dimensions(event)
    momag_bin       1xn array of bounds for moment magnitude bins e.g. 
                       momag_bin = [4.5:0.5:6.5];
                    Note that the value 0.0000000000001 is added to the last
                    entry in momag_bin to ensure that values corresponding to
                    momag_bin(-1) are captured i.e.
                        momag_bin = [4.5, 5.0, 5.5, 6.0, 6.5]
                    becomes
                        momag_bin = [4.5, 5.0, 5.5, 6.0, 6.5000000000001]
    R_bin           1xm list containing bounds for distance bins e.g. 
                       R_bin = [0:5:100];
                    Note that if R_extend_flag==1 R_bin is extended by one 
                    element as follows; R_bin(end+1) = 100000. This is done
                    to ensure that all values > R_bin(end) are included in the 
                    final R_bin. 
    R_extend_flag   Whether to extend the R_Bin last bin to catch overflow:
                       True  => extend R_bin (see R_bin)
                       False => do not extend R_bin
    Zlim            1x2 array of z-axis limits.

    Returns the normalised deaggregated loss, dimensions(mag_bin, dist_bin)

    """

    # convert all 'array' data to real arrays
    saved_fatalities = num.array(saved_fatalities)
    nu = num.array(nu)
    saved_rjb = num.array(saved_rjb)
    aus_mag = num.array(aus_mag)
    momag_bin = num.array(momag_bin)
    momag_bin = num.array(momag_bin)     # make sure we don't have a tuple

    # Verify the array shapes
    events = aus_mag.shape[0]
    assert nu.shape == (events,)
    
    
    # get total building value

    # get annualised loss in #
    (ann_fatalities, _) = calc_annloss.calc_annfatalities(saved_fatalities, nu)
    
    # Setup moment magnitude bins
    momag_bin[-1] = momag_bin[-1] + 0.0000000000001
    #momag_centroid = momag_bin[:-1] + num.diff(momag_bin)/2;
    #if len(momag_centroid) >= 3:
    #    momag_centroid[-1] = momag_bin[-2] + (momag_bin[-2] - momag_bin[-3])/2

    # Setup distance bins (doesn't necessarily need to be Joyner-Boore distance)
    Rjb_bin = R_bin[:]
    if R_extend_flag:
        Rjb_bin.append(10000)

    #Rjb_centroid = Rjb_bin[:-1] + num.diff(Rjb_bin)/2.0;
    #Rjb_centroid[-1] = Rjb_bin[-2] + (Rjb_bin[-2] - Rjb_bin[-3])/2.0

    mLength = len(momag_bin)
    RLength = len(Rjb_bin)

    # prepare result array for deaggregated annualised loss in $
    DeAggFatalities = num.zeros((mLength-1, RLength-1))
    
    for i in range(1, mLength):
## TODO: convert to log()
##        print('Now aggregating loss for magnitude greater than '
##              '%.2f and less than %.2f' % (momag_bin[i-1], momag_bin[i]))
        
        aus_mag = num.array(aus_mag)
        # finding magnitudes in mag bin
        #print aus_mag.shape
        mInd = num.nonzero((momag_bin[i-1] <= aus_mag) ==
                           (aus_mag < momag_bin[i]))[0]
        subSaved_fatalities = num.take(saved_fatalities, mInd, axis=0)
        #print saved_rjb.shape, mInd
        sliced_rjb = num.take(saved_rjb, mInd, axis=0)

        for j in range(1, RLength):
            FatalitiesMatrix = num.zeros(num.shape(subSaved_fatalities))
            
            ind = num.logical_and((Rjb_bin[j-1] <= sliced_rjb),
                                  (sliced_rjb < Rjb_bin[j])).nonzero()
            FatalitiesMatrix[ind] = subSaved_fatalities[ind]
            TempAggFatalities = num.sum(FatalitiesMatrix, axis=1)
            #print TempAggFatalities
            [_, TempPercFatalities, Tempcumnu_fatalities] = \
                         calc_annloss.acquire_riskval(
                TempAggFatalities, nu[mInd], 0)
            
            # convert recurrence rates (cumsum(nu)) to
            # prob. of exceed in 1 year
            TempProbExceed = 1 - num.exp(-Tempcumnu_fatalities)
            TempIntPercFatalities = calc_annloss.integrate_backwards(
                TempPercFatalities, TempProbExceed)

            #print DeAggFatalities.shape, TempIntPercFatalities.shape, TempPercFatalities
            
            if len(TempPercFatalities)>0:
                DeAggFatalities[i-1, j-1] = TempIntPercFatalities[0]
            else:
                DeAggFatalities[i-1, j-1] = 0

    # Normalise the Deaggregated loss by the annualised loss in $
    NormDeAggFatalities = 100.0 * DeAggFatalities / ann_fatalities;
    
    return NormDeAggFatalities

    
def calc_annloss_deagg_distmag(bldg_value,
                               saved_ecloss,
                               nu,
                               saved_rjb,
                               aus_mag,
                               momag_bin,
                               R_bin,
                               Zlim,
                               R_extend_flag=False):
    """Calculate the % of annualised loss disaggregated by mag and distance.

    It is the annualised loss disaggregated by mag and distance/ total
    annualised loss, as a percentage.

    Inputs:
    bldg_value      value of each building, dimensions(structure)
    saved_ecloss    building loss, dimensions(event, structure)
    nu              event activity, dimensions(event)
    saved_rjb       rjb distances array, dimensions(event, structure)
    aus_mag         earthquake magnitudes, dimensions(event)
    momag_bin       1xn array of bounds for moment magnitude bins e.g. 
                       momag_bin = [4.5:0.5:6.5];
                    Note that the value 0.0000000000001 is added to the last
                    entry in momag_bin to ensure that values corresponding to
                    momag_bin(-1) are captured i.e.
                        momag_bin = [4.5, 5.0, 5.5, 6.0, 6.5]
                    becomes
                        momag_bin = [4.5, 5.0, 5.5, 6.0, 6.5000000000001]
    R_bin           1xm list containing bounds for distance bins e.g. 
                       R_bin = [0:5:100];
                    Note that if R_extend_flag==1 R_bin is extended by one 
                    element as follows; R_bin(end+1) = 100000. This is done
                    to ensure that all values > R_bin(end) are included in the 
                    final R_bin. 
    R_extend_flag   Whether to extend the R_Bin last bin to catch overflow:
                       True  => extend R_bin (see R_bin)
                       False => do not extend R_bin
    Zlim            1x2 array of z-axis limits.

    Returns the normalised deaggregated loss, dimensions(mag_bin, dist_bin)

    """

    # convert all 'array' data to real arrays
    bldg_value = num.array(bldg_value)
    saved_ecloss = num.array(saved_ecloss)
    nu = num.array(nu)
    saved_rjb = num.array(saved_rjb)
    aus_mag = num.array(aus_mag)
    momag_bin = num.array(momag_bin)
    momag_bin = num.array(momag_bin)  # make sure we don't have a tuple

    # Verify the array shapes
    events = aus_mag.shape[0]
    structures = bldg_value.shape[0]
    assert saved_ecloss.shape == (events, structures)
    assert nu.shape == (events, )
    assert saved_rjb.shape == (events, structures)

    # get total building value
    tot_bldg_value = num.sum(bldg_value)

    # get annualised loss in $
    ((ann_loss, _), _) = calc_annloss.calc_annloss(saved_ecloss, bldg_value,
                                                   nu)

    # Setup moment magnitude bins
    momag_bin[-1] = momag_bin[-1] + 0.0000000000001
    #momag_centroid = momag_bin[:-1] + num.diff(momag_bin)/2;
    #if len(momag_centroid) >= 3:
    #    momag_centroid[-1] = momag_bin[-2] + (momag_bin[-2] - momag_bin[-3])/2

    # Setup distance bins (doesn't necessarily need to be Joyner-Boore distance)
    Rjb_bin = R_bin[:]
    if R_extend_flag:
        Rjb_bin.append(10000)

    #Rjb_centroid = Rjb_bin[:-1] + num.diff(Rjb_bin)/2.0;
    #Rjb_centroid[-1] = Rjb_bin[-2] + (Rjb_bin[-2] - Rjb_bin[-3])/2.0

    mLength = len(momag_bin)
    RLength = len(Rjb_bin)

    # prepare result array for deaggregated annualised loss in $
    DeAggLoss = num.zeros((mLength - 1, RLength - 1))

    for i in range(1, mLength):
        ## TODO: convert to log()
        ##        print('Now aggregating loss for magnitude greater than '
        ##              '%.2f and less than %.2f' % (momag_bin[i-1], momag_bin[i]))

        aus_mag = num.array(aus_mag)
        # finding magnitudes in mag bin
        mInd = num.nonzero(
            (momag_bin[i - 1] <= aus_mag) == (aus_mag < momag_bin[i]))[0]
        subSaved_ecloss = num.take(saved_ecloss, mInd, axis=0)
        sliced_rjb = num.take(saved_rjb, mInd, axis=0)

        for j in range(1, RLength):
            LossMatrix = num.zeros(num.shape(subSaved_ecloss))

            ind = num.logical_and((Rjb_bin[j - 1] <= sliced_rjb),
                                  (sliced_rjb < Rjb_bin[j])).nonzero()
            LossMatrix[ind] = subSaved_ecloss[ind]
            TempAggLoss = num.sum(LossMatrix, axis=1)
            [_, TempPercEcLoss, Tempcumnu_ecloss] = \
                         calc_annloss.acquire_riskval(
                TempAggLoss, nu[mInd], 0)

            # convert recurrence rates (cumsum(nu)) to
            # prob. of exceed in 1 year
            TempProbExceed = 1 - num.exp(-Tempcumnu_ecloss)
            TempIntPercEcLoss = calc_annloss.integrate_backwards(
                TempPercEcLoss, TempProbExceed)

            DeAggLoss[i - 1, j - 1] = TempIntPercEcLoss[0]

    # Normalise the Deaggregated loss by the annualised loss in $
    NormDeAggLoss = 100.0 * DeAggLoss / ann_loss

    return NormDeAggLoss