def load_bridge_data():
    """Preload bridge damage data from external files."""

    global ClassificationDamageParams, EQCoefficients

    # check dictionaries - if empty, load from file
    if ClassificationDamageParams is None:
        eqrm_dir = determine_eqrm_path(__file__)
        path = os.path.join(eqrm_dir, DataFilePath,
                            ClassificationDamageParamsCSVFile)
        ClassificationDamageParams = csvi.csv2rowdict(path,
                                        columns=['CLASS', 'K3D', 'Ishape',
                                                 'Slight', 'Moderate',
                                                 'Extensive', 'Complete'],
                                        convert={'Ishape': int,
                                                 'Slight': float,
                                                 'Moderate': float,
                                                 'Extensive': float,
                                                 'Complete': float})
    if EQCoefficients is None:
        eqrm_dir = determine_eqrm_path(__file__)
        path = os.path.join(eqrm_dir, DataFilePath, EQCoefficientsCSVFile)
        EQCoefficients = csvi.csv2rowdict(path,
                                          columns=['Equation','A','B'],
                                          convert={'A': float, 'B': int})
Beispiel #2
0
def get_sites_from_dic(attribute_dic=None,
                       buildings_usage_classification='HAZUS'  ):
        handle, file_name = tempfile.mkstemp('.csv','test_struct_')
        os.close(handle)
        file_name, attribute_dic = write_test_file(file_name,attribute_dic)

        eqrm_dir = determine_eqrm_path()
        
        data_dir = os.path.join(eqrm_dir, 'resources', 'data')
        #print "data_dir", data_dir

        # Build lookup table for building parameters
        building_classification_tag = ''
        damage_extent_tag = ''
        
        default_input_dir=os.path.join(eqrm_dir, 'resources',
                                       'data')
        sites=Structures.from_csv(
            file_name,
            building_classification_tag,
            damage_extent_tag,
            default_input_dir,
            eqrm_dir=eqrm_dir,
             buildings_usage_classification=buildings_usage_classification
            )
        os.remove(file_name)
        return sites, attribute_dic
Beispiel #3
0
    def test_cadel_ground_motion(self):

        eqrm_dir = determine_eqrm_path()
        cadel_dir = join(eqrm_dir, "..", "test_cadell", "Cadell")
        natcadell_loc = join(cadel_dir, "natcadell.csv")

        # Silently return from the test if the data set does not exist.
        # The data is in python_eqrm
        if not exists(natcadell_loc):
            return
        default_input_dir = join(eqrm_dir, "resources", "data", "")
        sites = Structures.from_csv(
            natcadell_loc, "", "", default_input_dir, eqrm_dir=eqrm_dir, buildings_usage_classification="FCB"
        )

        magnitudes = array([7.2])

        cadell_periods_loc = join(cadel_dir, "Cadell_periods.csv")
        periods = csv.reader(open(cadell_periods_loc)).next()
        periods = array([float(v) for v in periods])
        num_periods = len(periods)
        num_sites = len(sites.latitude)

        assert allclose(periods, [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5])

        cadell_gm_loc = join(cadel_dir, "Cadell_ground_motions_precision.csv")
        SA = self.ground_motions_from_csv(cadell_gm_loc, num_periods, num_sites)
        # SA = SA[0:1,...]

        # set up damage model
        csm_use_variability = None
        csm_standard_deviation = None
        damage_model = Damage_model(sites, SA, periods, magnitudes, csm_use_variability, csm_standard_deviation)
        damage_model.csm_use_variability = False
        damage_model.csm_standard_deviation = 0.3

        point = damage_model.get_building_displacement()
        # point is SA,SD

        # SA should by of shape
        # (number of buildings,number of events,number of samples).
        # print point[0].shape

        # check that SA is the right shape
        assert point[0].shape == (num_sites, 1)

        # check that SD is the same shape as SA
        assert point[1].shape == point[0].shape

        point = (point[0][..., 0], point[1][..., 0])
        # collapse out sample dimension so it matches the shape of matlab

        cadell_bd_loc = join(cadel_dir, "Cadell_building_displacements.csv")
        matlab_point = open(cadell_bd_loc)
        matlab_point = array([[float(p) for p in mpoint.split(",")] for mpoint in matlab_point])
        matlab_point = (matlab_point[:, 1], matlab_point[:, 0])
        assert allclose(point, matlab_point, 5e-3)
        assert allclose(point, matlab_point, 1e-2)
        # check that we are 1% of matlabs SA and SD
        assert allclose(point, matlab_point, 5e-3)
Beispiel #4
0
def get_sites_from_dic(attribute_dic=None,
                       buildings_usage_classification='HAZUS'):
    handle, file_name = tempfile.mkstemp('.csv', 'test_struct_')
    os.close(handle)
    file_name, attribute_dic = write_test_file(file_name, attribute_dic)

    eqrm_dir = determine_eqrm_path()

    data_dir = os.path.join(eqrm_dir, 'resources', 'data')
    #print "data_dir", data_dir

    # Build lookup table for building parameters
    building_classification_tag = ''
    damage_extent_tag = ''

    default_input_dir = os.path.join(eqrm_dir, 'resources', 'data')
    sites = Structures.from_csv(
        file_name,
        building_classification_tag,
        damage_extent_tag,
        default_input_dir,
        eqrm_dir=eqrm_dir,
        buildings_usage_classification=buildings_usage_classification)
    os.remove(file_name)
    return sites, attribute_dic
Beispiel #5
0
def load_external_data(fp):
    """Load mean and sigma values from an external file.

    Data is placed in a numpy array where row number is the state
    number and the columns are the ppf() function for each pf value

    That is, if the source data file contains:
        State,StateIndex,Mean,Sigma
        none,0,0.0,0.00001
        slight,1,0.6,0.6
        moderate,2,2.5,2.7
        extensive,3,75.0,42.0
        complete,4,230.0,110.0
    and the fp value is [10, 20] then the resulting array will be:
           fp(10) fp(20)
            vvv   vvv
        [[    0     0    ]	<- state 0 (none)
         [    1     1    ]	<- state 1 (slight)
         [    1     2    ]	<- state 2 (moderate)
         [   40    53    ]	<- state 3 (extensive)
         [  138   173    ]]	<- state 4 (complete)

    All 'none' state days will be 0 days.  All other states have values
    computed with ceil() and if less than 1 changed to 1.
    """

    eqrm_dir = determine_eqrm_path(__file__)
    path = os.path.join(eqrm_dir,
                        eqrm_filesystem.Resources_Data_Path,
                        MeanSigmaValuesCSVFile)
    dict = csvi.csv2rowdict(path,
                            columns=['StateIndex', 'Mean', 'Sigma'],
                            convert={'StateIndex': int,
                                     'Mean': float,
                                     'Sigma': float })

    data = []
    for i in range(5):
        (mean, sigma) = dict[str(i)]
        data.append(scipy.stats.norm.ppf(fp, mean, sigma))
    data = numpy.array(data)


    # 'normalise' results
    # if value < 0, convert to 1
    # if value < 1 and > 0, convert to 1
    result = data.clip(1.0, 99999999999.0)
    result = numpy.ceil(numpy.array(result))
    result[0,:] = 0.0		# force all 'none' times to 0

    return result
def load_external_data(fp):
    """Load mean and sigma values from an external file.

    Data is placed in a numpy array where row number is the state
    number and the columns are the ppf() function for each pf value

    That is, if the source data file contains:
        State,StateIndex,Mean,Sigma
        none,0,0.0,0.00001
        slight,1,0.6,0.6
        moderate,2,2.5,2.7
        extensive,3,75.0,42.0
        complete,4,230.0,110.0
    and the fp value is [10, 20] then the resulting array will be:
           fp(10) fp(20)
            vvv   vvv
        [[    0     0    ]	<- state 0 (none)
         [    1     1    ]	<- state 1 (slight)
         [    1     2    ]	<- state 2 (moderate)
         [   40    53    ]	<- state 3 (extensive)
         [  138   173    ]]	<- state 4 (complete)

    All 'none' state days will be 0 days.  All other states have values
    computed with ceil() and if less than 1 changed to 1.
    """

    eqrm_dir = determine_eqrm_path(__file__)
    path = os.path.join(eqrm_dir, eqrm_filesystem.Resources_Data_Path,
                        MeanSigmaValuesCSVFile)
    dict = csvi.csv2rowdict(path,
                            columns=['StateIndex', 'Mean', 'Sigma'],
                            convert={
                                'StateIndex': int,
                                'Mean': float,
                                'Sigma': float
                            })

    data = []
    for i in range(5):
        (mean, sigma) = dict[str(i)]
        data.append(scipy.stats.norm.ppf(fp, mean, sigma))
    data = numpy.array(data)

    # 'normalise' results
    # if value < 0, convert to 1
    # if value < 1 and > 0, convert to 1
    result = data.clip(1.0, 99999999999.0)
    result = numpy.ceil(numpy.array(result))
    result[0, :] = 0.0  # force all 'none' times to 0

    return result
Beispiel #7
0
    def _test_get_independent_polygons(self):
        from eqrm_code.generation_polygon import polygons_from_xml
        eqrm_dir = determine_eqrm_path()
        #print "eqrm_dir", eqrm_dir
        file_location = join(eqrm_dir, 'test_resources','sample_event.xml')
        #print "file_location", file_location
        polygons,mag_type = polygons_from_xml(file_location,
                      azi=[180,180],
                      dazi=[10,10],
                      fault_dip=[30,30],
                      fault_width=15,
                      override_xml=True)
        independent_polygons=get_independent_polygons_obsolete(polygons)
        assert len(independent_polygons)==3
        
        #conformance tests, could change if I change the way area works
        assert allclose(independent_polygons[0].area,80.685560816)
        assert allclose(independent_polygons[1].area,16.314439184)
        assert allclose(independent_polygons[2].area, 27.919220365)

        #no more conformance tests (these are real ones):
        for i in range(len(independent_polygons)):
            for j in range(len(independent_polygons)):
                if not i == j:
                    polygoni = independent_polygons[i]
                    polygonj = independent_polygons[j]
                    int = polygoni.intersection(polygonj)
                    assert int.area == 0

        area1=0
        area2=0

        for poly in polygons:
            area1+=poly.area
        for poly in independent_polygons:
            area2+=poly.area
        assert not allclose(area1,area2)
        assert allclose(area2,area1-polygons[0].intersection(polygons[1]).area)
Beispiel #8
0
    def from_csv(cls,
                 sites_filename,
                 building_classification_tag,
                 damage_extent_tag,
                 default_input_dir,
                 input_dir=None,
                 eqrm_dir=None,
                 buildings_usage_classification='HAZUS',
                 use_refined_btypes=True,
                 force_btype_flag=False,
                 determ_btype=None,
                 determ_buse=None,
                 loss_aus_contents=0):
        """Read structures data from a file.
        Extract strucuture parameters from building_parameters_table.

        parameters
          sites_filename is actually a file handle

            - some parameters depend on structure classification (hazus or
              refined hazus, depending on use_refined_btypes)

            - some parameters depend on usage (fcb or hazus, depending on
              use_fcb_usage)

            - force_btype_flag will force all buildings to be of type
              determ_btype, and usage determ_buse.

              See page 11 of the tech manual to understand the flags

              refined_btypes = Edwards building classification
        """

        if eqrm_dir is None:
            eqrm_dir = determine_eqrm_path(__file__)

        if force_btype_flag:
            raise NotImplementedError
        sites_dict = csv_to_arrays(sites_filename, **attribute_conversions)

        latitude = sites_dict.pop("LATITUDE")
        longitude = sites_dict.pop("LONGITUDE")

        # create copy of attributes
        attributes = copy.copy(sites_dict)

        # Do we use refined_btypes or hazus btypes?
        if not use_refined_btypes:
            attributes['STRUCTURE_CLASSIFICATION'] = \
                attributes['HAZUS_STRUCTURE_CLASSIFICATION']

        # building_parameters_table has alot of info read in from
        # varrious files in resources data.
        # it is a dic, when the key signifies what the info is about, eg
        # height, ductility.  The index of the data represents the structure
        # classification, which is stored in
        # building_parameters_table['structure_classification']
        building_parameters_table = \
            building_params_from_csv(building_classification_tag,
                                     damage_extent_tag,
                                     default_input_dir=default_input_dir,
                                     input_dir=input_dir)

        # get index that maps attributes ->
        #    building_parameters_table (joined by 'structure_classification')
        structure_classification = attributes['STRUCTURE_CLASSIFICATION']
        building_parameter_index = \
            get_index(
                building_parameters_table['structure_classification'],
                structure_classification)

        # Now extract building_parameters from the table,
        # using building_parameter_index
        building_parameters = {}
        for key in building_parameters_table.keys():
            building_parameters[key] = \
                building_parameters_table[key][building_parameter_index]

        # Get non-structural drift thesholds (depending on whether or not
        # they are residential)
        # extract usage dependent parameters
        if buildings_usage_classification is 'HAZUS':
            usages = attributes['HAZUS_USAGE']
            is_residential = (array([(usage[0:4] in ['RES1', 'RES2', 'RES3'])
                                     for usage in usages]))
        elif buildings_usage_classification is 'FCB':
            usages = attributes['FCB_USAGE']
            is_residential = ((usages <= 113) + (usages == 131))
        else:
            msg = ('b_usage_type_flat = ' + str(buildings_usage_classification)
                   + " not 'FCB' or 'HAZUS'")
            raise ValueError(msg)

        is_residential.shape = (-1, 1)  # reshape so it can be broadcast
        nr = building_parameters['non_residential_drift_threshold']
        r = building_parameters['residential_drift_threshold']
        drift_threshold = r * is_residential + nr * (1 - is_residential)
        building_parameters['drift_threshold'] = drift_threshold

        is_residential.shape = (-1,)
        if loss_aus_contents == 1:
            attributes['CONTENTS_COST_DENSITY'] *= \
                (is_residential * 0.6 + (is_residential - 1)
                 * 1.0)  # reduce contents

        rcp = build_replacement_ratios(usages, buildings_usage_classification)
        building_parameters['structure_ratio'] = rcp['structural']
        building_parameters['nsd_d_ratio'] = \
            rcp['nonstructural drift sensitive']
        building_parameters['nsd_a_ratio'] = \
            rcp['nonstructural acceleration sensitive']

        # create structures:
        return cls(latitude, longitude, building_parameters, **attributes)
Beispiel #9
0
def build_replacement_ratios(usage_per_struct, buildings_usage_classification):
    """Return an array of the building components replacement cost ratios.
    shape (# of buildings, 3 (# of components in a structure))

    A structure has 3 components;
      structural
      nonstructural acceleration sensitive
      nonstructural drift sensitive

    The ratio of the replacement cost for each component differs for
    different usage types.

    Also, there are two scales of usage types;
      Functional classification of buildings (FCB)(ABS usage)
      HAZUS usage

    Parameters:
      usage_per_struct: An array, shape (# of buildings) of usage
        values for each structure.
      buildings_usage_classification: The usage scale used.

    Return:
      A dictionary of cost ratios for the strucutures.
      The keys are the structural components;
        'structural', 'nonstructural drift sensitive',
        'nonstructural acceleration sensitive'

    Note: This should only be used once, since it is loading a file.
    """

    usage_column = 'Functional classification of buildings (ABS usage)'
    components = ['structural', 'nonstructural drift sensitive',
                  'nonstructural acceleration sensitive']
    convert = {}

    # extract usage dependent parameters
    if buildings_usage_classification is 'HAZUS':  # HAZUS_USAGE
        file = 'replacement_cost_ratios_wrt_HAZUS_Usage.csv'
    elif buildings_usage_classification is 'FCB':  # FCB_USAGE
        file = 'replacement_cost_ratios_wrt_FCB_Usage.csv'
        convert[usage_column] = int
    else:
        msg = ('b_usage_type_flat = ' + str(buildings_usage_classification)
               + ' not "FCB" or "HAZUS"')
        raise ValueError(msg)

    for comp in components:
        convert[comp] = float

    root_dir = determine_eqrm_path()
    data_dir = join(root_dir, 'resources', 'data')
    (att_dic, _) = csv2dict(join(data_dir, file), convert=convert)

    # This way does not have an assert
    # Build a dict with
    # key (usage, component), value = cost ratio
#     cost_ratios = {}
#     for comp in components:
#         tmp = {}
#         for i,usage in enumerate(att_dic[usage_column]):
#             tmp[usage] = att_dic[comp][i]
#         cost_ratios[comp] = tmp

#     replacement_cost_ratios = {}
#     for comp in components:
#         this_cost_ratios = cost_ratios[comp]
#         tmp = [this_cost_ratios[usage] for usage in usage_per_struct]
#         replacement_cost_ratios[comp] = array(tmp)

    cost_ratios = {}
    for (i, usage) in enumerate(att_dic[usage_column]):
        cost_ratios[usage] = [att_dic[components[0]][i],
                              att_dic[components[1]][i],
                              att_dic[components[2]][i]]

    rcp_array = asarray([cost_ratios[usage] for usage in usage_per_struct])
    assert allclose(sum(rcp_array), rcp_array.shape[0], 0.0000001)

    replacement_cost_ratios = {}
    for (i, comp) in enumerate(components):
        replacement_cost_ratios[comp] = rcp_array[:, i]

    return replacement_cost_ratios
Beispiel #10
0
    if mini_check_scenarios is True:
        # mini_check_scenarios is not packaged up with the distribution
        if verbose:
            print "Start mini_check_scenarios.py"
        chdir(eqrm_root_dir)
        Retcode = run_call('mini_check_scenarios.py', eqrm_root_dir,
                           python_command=python_command)
        if not Retcode == 0:
            print 'ERROR: mini_check_scenarios.py failed.'
            stop_message()
            return False

    if demo_batchrun is True:
        if verbose:
            print "Start demo_batchrun.py"
        chdir(join(eqrm_root_dir, 'demo'))
        run_call(join('demo', 'demo_batchrun.py'), eqrm_root_dir,
                 python_command=python_command)
        # No return code, since no comparisions are made

    return True

if __name__ == '__main__':
    do_tests_checks_demos_audits(determine_eqrm_path(),
                                 check_scenarios=False,
                                 ip_audit=True,
                                 test_all=False,
                                 mini_check_scenarios=False,
                                 demo_batchrun=False,
                                 python_command='python')
    def test_cadel_ground_motion(self):   
        
        eqrm_dir = determine_eqrm_path()
        cadel_dir = join(eqrm_dir,'..','test_cadell', 'Cadell')
        natcadell_loc = join(cadel_dir, 'natcadell.csv')

        # Silently return from the test if the data set does not exist.
        # The data is in python_eqrm
        if not exists(natcadell_loc):
            return
        default_input_dir = join(eqrm_dir,'resources',
                                 'data', '')
        sites=Structures.from_csv(natcadell_loc,
                                  '',
                                  '',
                                  default_input_dir,
                                  eqrm_dir=eqrm_dir,
                                  buildings_usage_classification='FCB')

        magnitudes=array([7.2])                     

        cadell_periods_loc = join(cadel_dir, 'Cadell_periods.csv')
        periods=csv.reader(open(cadell_periods_loc)).next()
        periods=array([float(v) for v in periods])
        num_periods=len(periods)
        num_sites=len(sites.latitude)

        assert allclose(periods,[0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,
                                 0.9,1,1.5,2,2.5,3,3.5,4,4.5,5])
        
        cadell_gm_loc = join(cadel_dir, 'Cadell_ground_motions_precision.csv')
        SA=self.ground_motions_from_csv(cadell_gm_loc,
            num_periods,num_sites)
        #SA = SA[0:1,...]

        # set up damage model
        csm_use_variability = None
        csm_standard_deviation = None
        damage_model=Damage_model(sites,SA,periods,magnitudes,
                 csm_use_variability, csm_standard_deviation)
        damage_model.csm_use_variability=False
        damage_model.csm_standard_deviation=0.3
        
        point=damage_model.get_building_displacement()
        # point is SA,SD 
        
        # SA should by of shape
        # (number of buildings,number of events,number of samples).
        # print point[0].shape

        # check that SA is the right shape
        assert point[0].shape==(num_sites,1)
        
        # check that SD is the same shape as SA
        assert point[1].shape== point[0].shape 

        point = (point[0][...,0],point[1][...,0])
        #collapse out sample dimension so it matches the shape of matlab
     
        cadell_bd_loc = join(cadel_dir, 'Cadell_building_displacements.csv')
        matlab_point=open(cadell_bd_loc)
        matlab_point=array([[float(p) for p in mpoint.split(',')]
                            for mpoint in matlab_point])        
        matlab_point=(matlab_point[:,1],matlab_point[:,0])
        assert allclose(point,matlab_point,5e-3)
        assert allclose(point,matlab_point,1e-2)
        # check that we are 1% of matlabs SA and SD
        assert allclose(point,matlab_point,5e-3)
Beispiel #12
0
The variables below describe where in the EQRM filesystem
certain parts of the system live.
 
Version: $Revision$  
ModifiedBy: $Author$
ModifiedDate: $Date$

Copyright 2007 by Geoscience Australia

"""

import os

import eqrm_code.util as util

eqrm_path = util.determine_eqrm_path()

# define various paths
Resources_Data_Path = os.path.join(eqrm_path, 'resources', 'data')

demo_path = os.path.join(eqrm_path, 'demo')

Implementation_Path = os.path.join(eqrm_path, 'implementation_tests')

mini_scenario_scenarios_path = os.path.join(eqrm_path, 'implementation_tests',
                                            'mini_scenarios', 'scenarios')

mini_scenario_Path = os.path.join(eqrm_path, 'implementation_tests',
                                  'mini_scenarios')

scenario_input_path = os.path.join(eqrm_path, 'implementation_tests', 'input')
Beispiel #13
0
    def test_building_response(self):
        #Test that building response is the same as matlab

        periods=array([0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5,
                       0.6, 0.7, 0.8, 0.9, 1, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7,
                       1.8, 1.9, 2, 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9,
                       3])

        SA = array([0.017553049, 0.028380350, 0.036142210, 0.037701113,
                    0.039325398, 0.038083417, 0.036880517, 0.036190107,
                    0.035512489, 0.035088679, 0.034669917, 0.033162774,
                    0.030871523, 0.027841184, 0.025094836, 0.022850476,
                    0.021322256, 0.019895084, 0.018562342, 0.017317833,
                    0.016160874, 0.015195155, 0.014287144, 0.013433394,
                    0.012630662, 0.011875899, 0.011166239, 0.010498986,
                    0.009871606, 0.009281717, 0.008727078, 0.008140645,
                    0.007593619, 0.007083352, 0.006607374, 0.006163380])

        SA = SA[newaxis, newaxis, :]

        magnitudes = array([6.5])

        Btype = 'RM2L'

        eqrm_dir = determine_eqrm_path()
        default_input_dir = join(eqrm_dir, 'resources', 'data', '')
        building_parameters = \
            building_params_from_csv(building_classification_tag = '',
                                     damage_extent_tag = '',
                                     default_input_dir=default_input_dir)

        # Pull the parameters out:
        b_index = where([(bt == Btype) for bt in
                             building_parameters['structure_classification']])
        new_bp = {}
        for key in building_parameters:
            try:
                new_bp[key] = building_parameters[key][b_index]
            except:
                new_bp[key] = building_parameters[key]

        structures = Structures(latitude=[-31], longitude=[150],
                                building_parameters=new_bp,
                                #bridge_parameters={},
                                FCB_USAGE=array([111]),
                                STRUCTURE_CLASSIFICATION=array([Btype]),
                                STRUCTURE_CATEGORY=array(['BUILDING']))
        building_parameters = structures.building_parameters

        # All the same for this type anyway
        csm_use_variability = None
        csm_standard_deviation = None
        damage_model = Damage_model(structures, SA, periods, magnitudes,
                                    csm_use_variability, csm_standard_deviation)

        # set up the capacity model
        capacity_spectrum_model = Capacity_spectrum_model(periods, magnitudes,
                                                          building_parameters)

        capacity_spectrum_model.smooth_damping = True
        capacity_spectrum_model.use_displacement_corner_period = True
        capacity_spectrum_model.damp_corner_periods = True
        capacity_spectrum_model.use_exact_area = True
        capacity_spectrum_model.rtol = 0.01
        capacity_spectrum_model.csm_damping_max_iterations = 7

        ###########################################################

        damage_model.capacity_spectrum_model = capacity_spectrum_model

        # Warning, point is not used
        point = damage_model.get_building_displacement()

        # matlab values
        SAcr = 0.032208873
        SDcr = 0.97944026
        assert allclose(point[0], SAcr)
        assert allclose(point[1], SDcr)
        assert allclose(point, [[[SAcr]], [[SDcr]]])
Beispiel #14
0
    def from_csv(cls,
                 sites_filename,
                 building_classification_tag,
                 damage_extent_tag,
                 default_input_dir,
                 input_dir=None,
                 eqrm_dir=None,
                 buildings_usage_classification='HAZUS',
                 use_refined_btypes=True,
                 force_btype_flag=False,
                 determ_btype=None,
                 determ_buse=None,
                 loss_aus_contents=0):
        """Read structures data from a file.
        Extract strucuture parameters from building_parameters_table.

        parameters
          sites_filename is actually a file handle

            - some parameters depend on structure classification (hazus or
              refined hazus, depending on use_refined_btypes)

            - some parameters depend on usage (fcb or hazus, depending on
              use_fcb_usage)

            - force_btype_flag will force all buildings to be of type
              determ_btype, and usage determ_buse.

              See page 11 of the tech manual to understand the flags

              refined_btypes = Edwards building classification
        """

        if eqrm_dir is None:
            eqrm_dir = determine_eqrm_path(__file__)

        if force_btype_flag:
            raise NotImplementedError
        sites_dict = csv_to_arrays(sites_filename, **attribute_conversions)

        latitude = sites_dict.pop("LATITUDE")
        longitude = sites_dict.pop("LONGITUDE")

        # create copy of attributes
        attributes = copy.copy(sites_dict)

        # Do we use refined_btypes or hazus btypes?
        if not use_refined_btypes:
            attributes['STRUCTURE_CLASSIFICATION'] = \
                attributes['HAZUS_STRUCTURE_CLASSIFICATION']

        # building_parameters_table has alot of info read in from
        # varrious files in resources data.
        # it is a dic, when the key signifies what the info is about, eg
        # height, ductility.  The index of the data represents the structure
        # classification, which is stored in
        # building_parameters_table['structure_classification']
        building_parameters_table = \
            building_params_from_csv(building_classification_tag,
                                     damage_extent_tag,
                                     default_input_dir=default_input_dir,
                                     input_dir=input_dir)

        # get index that maps attributes ->
        #    building_parameters_table (joined by 'structure_classification')
        structure_classification = attributes['STRUCTURE_CLASSIFICATION']
        building_parameter_index = \
            get_index(
                building_parameters_table['structure_classification'],
                structure_classification)

        # Now extract building_parameters from the table,
        # using building_parameter_index
        building_parameters = {}
        for key in building_parameters_table.keys():
            building_parameters[key] = \
                building_parameters_table[key][building_parameter_index]

        # Get non-structural drift thesholds (depending on whether or not
        # they are residential)
        # extract usage dependent parameters
        if buildings_usage_classification is 'HAZUS':
            usages = attributes['HAZUS_USAGE']
            is_residential = (array([(usage[0:4] in ['RES1', 'RES2', 'RES3'])
                                     for usage in usages]))
        elif buildings_usage_classification is 'FCB':
            usages = attributes['FCB_USAGE']
            is_residential = ((usages <= 113) + (usages == 131))
        else:
            msg = ('b_usage_type_flat = ' +
                   str(buildings_usage_classification) +
                   " not 'FCB' or 'HAZUS'")
            raise ValueError(msg)

        is_residential.shape = (-1, 1)  # reshape so it can be broadcast
        nr = building_parameters['non_residential_drift_threshold']
        r = building_parameters['residential_drift_threshold']
        drift_threshold = r * is_residential + nr * (1 - is_residential)
        building_parameters['drift_threshold'] = drift_threshold

        is_residential.shape = (-1, )
        if loss_aus_contents == 1:
            attributes['CONTENTS_COST_DENSITY'] *= \
                (is_residential * 0.6 + (is_residential - 1)
                 * 1.0)  # reduce contents

        rcp = build_replacement_ratios(usages, buildings_usage_classification)
        building_parameters['structure_ratio'] = rcp['structural']
        building_parameters['nsd_d_ratio'] = \
            rcp['nonstructural drift sensitive']
        building_parameters['nsd_a_ratio'] = \
            rcp['nonstructural acceleration sensitive']

        # create structures:
        return cls(latitude, longitude, building_parameters, **attributes)
Beispiel #15
0
def build_replacement_ratios(usage_per_struct, buildings_usage_classification):
    """Return an array of the building components replacement cost ratios.
    shape (# of buildings, 3 (# of components in a structure))

    A structure has 3 components;
      structural
      nonstructural acceleration sensitive
      nonstructural drift sensitive

    The ratio of the replacement cost for each component differs for
    different usage types.

    Also, there are two scales of usage types;
      Functional classification of buildings (FCB)(ABS usage)
      HAZUS usage

    Parameters:
      usage_per_struct: An array, shape (# of buildings) of usage
        values for each structure.
      buildings_usage_classification: The usage scale used.

    Return:
      A dictionary of cost ratios for the strucutures.
      The keys are the structural components;
        'structural', 'nonstructural drift sensitive',
        'nonstructural acceleration sensitive'

    Note: This should only be used once, since it is loading a file.
    """

    usage_column = 'Functional classification of buildings (ABS usage)'
    components = [
        'structural', 'nonstructural drift sensitive',
        'nonstructural acceleration sensitive'
    ]
    convert = {}

    # extract usage dependent parameters
    if buildings_usage_classification is 'HAZUS':  # HAZUS_USAGE
        file = 'replacement_cost_ratios_wrt_HAZUS_Usage.csv'
    elif buildings_usage_classification is 'FCB':  # FCB_USAGE
        file = 'replacement_cost_ratios_wrt_FCB_Usage.csv'
        convert[usage_column] = int
    else:
        msg = ('b_usage_type_flat = ' + str(buildings_usage_classification) +
               ' not "FCB" or "HAZUS"')
        raise ValueError(msg)

    for comp in components:
        convert[comp] = float

    root_dir = determine_eqrm_path()
    data_dir = join(root_dir, 'resources', 'data')
    (att_dic, _) = csv2dict(join(data_dir, file), convert=convert)

    # This way does not have an assert
    # Build a dict with
    # key (usage, component), value = cost ratio
    #     cost_ratios = {}
    #     for comp in components:
    #         tmp = {}
    #         for i,usage in enumerate(att_dic[usage_column]):
    #             tmp[usage] = att_dic[comp][i]
    #         cost_ratios[comp] = tmp

    #     replacement_cost_ratios = {}
    #     for comp in components:
    #         this_cost_ratios = cost_ratios[comp]
    #         tmp = [this_cost_ratios[usage] for usage in usage_per_struct]
    #         replacement_cost_ratios[comp] = array(tmp)

    cost_ratios = {}
    for (i, usage) in enumerate(att_dic[usage_column]):
        cost_ratios[usage] = [
            att_dic[components[0]][i], att_dic[components[1]][i],
            att_dic[components[2]][i]
        ]

    rcp_array = asarray([cost_ratios[usage] for usage in usage_per_struct])
    assert allclose(sum(rcp_array), rcp_array.shape[0], 0.0000001)

    replacement_cost_ratios = {}
    for (i, comp) in enumerate(components):
        replacement_cost_ratios[comp] = rcp_array[:, i]

    return replacement_cost_ratios
Beispiel #16
0
    def test_building_response(self):
        #Test that building response is the same as matlab

        periods=array([0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5,
                       0.6, 0.7, 0.8, 0.9, 1, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7,
                       1.8, 1.9, 2, 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9,
                       3])

        SA = array([0.017553049, 0.028380350, 0.036142210, 0.037701113,
                    0.039325398, 0.038083417, 0.036880517, 0.036190107,
                    0.035512489, 0.035088679, 0.034669917, 0.033162774,
                    0.030871523, 0.027841184, 0.025094836, 0.022850476,
                    0.021322256, 0.019895084, 0.018562342, 0.017317833,
                    0.016160874, 0.015195155, 0.014287144, 0.013433394,
                    0.012630662, 0.011875899, 0.011166239, 0.010498986,
                    0.009871606, 0.009281717, 0.008727078, 0.008140645,
                    0.007593619, 0.007083352, 0.006607374, 0.006163380])

        SA = SA[newaxis, newaxis, :]

        magnitudes = array([6.5])

        Btype = 'RM2L'

        eqrm_dir = determine_eqrm_path()
        default_input_dir = join(eqrm_dir, 'resources', 'data', '')
        building_parameters = \
            building_params_from_csv(building_classification_tag = '',
                                     damage_extent_tag = '',
                                     default_input_dir=default_input_dir)

        # Pull the parameters out:
        b_index = where([(bt == Btype) for bt in
                             building_parameters['structure_classification']])
        new_bp = {}
        for key in building_parameters:
            try:
                new_bp[key] = building_parameters[key][b_index]
            except:
                new_bp[key] = building_parameters[key]

        structures = Structures(latitude=[-31], longitude=[150],
                                building_parameters=new_bp,
                                #bridge_parameters={},
                                FCB_USAGE=array([111]),
                                STRUCTURE_CLASSIFICATION=array([Btype]),
                                STRUCTURE_CATEGORY=array(['BUILDING']))
        building_parameters = structures.building_parameters

        # All the same for this type anyway
        csm_use_variability = None
        csm_standard_deviation = None
        damage_model = Damage_model(structures, SA, periods, magnitudes,
                                    csm_use_variability, csm_standard_deviation)

        # set up the capacity model
        capacity_spectrum_model = Capacity_spectrum_model(periods, magnitudes,
                                                          building_parameters)

        capacity_spectrum_model.smooth_damping = True
        capacity_spectrum_model.use_displacement_corner_period = True
        capacity_spectrum_model.damp_corner_periods = True
        capacity_spectrum_model.use_exact_area = True
        capacity_spectrum_model.rtol = 0.01
        capacity_spectrum_model.csm_damping_max_iterations = 7

        ###########################################################

        damage_model.capacity_spectrum_model = capacity_spectrum_model

        # Warning, point is not used
        point = damage_model.get_building_displacement()

        # matlab values
        SAcr = 0.032208873
        SDcr = 0.97944026
        assert allclose(point[0], SAcr)
        assert allclose(point[1], SDcr)
        assert allclose(point, [[[SAcr]], [[SDcr]]])
def obsolete_convert_Py2Mat_Risk(site_tag,
                                 datadir,
                                 param_t,
                                 default_data=None):
    """Get python data as attributes of object.

    site_tag     Site Location (?) should get from PARAM_T (whatever *that* is)
    datadir      path to directory containing data
    param_t      name of the PARAM_T file
    default_data path to default data directory

    Returns a data object.  See module docstring.
    """

    # get EQRM root path
    eqrm_path = util.determine_eqrm_path()

    # handle missing default_data
    if default_data is None:
        default_data = os.path.join(eqrm_path,
                                    eqrm_filesystem.Resources_Data_Path)

    # easy stuff
    saved_ecbval2_file = os.path.join(
        eqrm_path, eqrm_filesystem.Demo_Output_ProbRisk_Path,
        site_tag + '_bval.txt')
    saved_ecbval2 = load_data(saved_ecbval2_file)
    saved_ecbval2 = num.array(saved_ecbval2).flat[:]  # make a row vector

    saved_rjb_file = os.path.join(eqrm_path,
                                  eqrm_filesystem.Demo_Output_ProbRisk_Path,
                                  site_tag + '_distance_rjb.txt')
    saved_rjb = load_data(saved_rjb_file)
    saved_rjb = saved_rjb.transpose()

    # get loss values - note: we only do contents+building
    saved_ecloss_file = os.path.join(eqrm_path,
                                     eqrm_filesystem.Demo_Output_ProbRisk_Path,
                                     site_tag + '_total_building_loss.txt')
    saved_ecloss = load_data(saved_ecloss_file)
    saved_ecloss = saved_ecloss[1:]  # strip building ID row
    saved_ecloss = saved_ecloss.transpose()

    # get aus_mag and nu data
    tmp1_file = os.path.join(eqrm_path,
                             eqrm_filesystem.Demo_Output_ProbRisk_Path,
                             site_tag + '_event_set.txt')
    tmp1 = load_data(tmp1_file)
    nu = tmp1[:, 8]
    aus_mag = tmp1[:, 9]

    # structure information
    saved_struct_file = os.path.join(eqrm_path,
                                     eqrm_filesystem.Demo_Output_ProbRisk_Path,
                                     site_tag + '_structures.txt')
    print('saved_struct_file=%s' % saved_struct_file)
    saved_struct = load_struct_data(saved_struct_file)
    # convert certain fields in-situ
    convert_btype_to_btypeindex(saved_struct, default_data)
    convert_uses_to_usesindex(saved_struct, default_data)
    convert_suburb_to_suburbindex(saved_struct, default_data)

    # create result object, add data attributes
    result = Result()

    result.ecbval2 = num.array(saved_ecbval2)
    result.ecloss = num.array(saved_ecloss)
    result.rjb = num.array(saved_rjb)
    result.aus_mag = num.array(aus_mag)
    result.nu = num.array(nu)

    # convert structures array to lists, add to data attributes
    result.latitude = num.array([x[0] for x in saved_struct])
    result.longitude = num.array([x[1] for x in saved_struct])
    result.pre1989 = [x[2] for x in saved_struct]
    result.postcode = num.array([x[3] for x in saved_struct])
    result.site_class = [x[4] for x in saved_struct]
    result.suburb = [x[5] for x in saved_struct]
    result.survey_factor = num.array([x[6] for x in saved_struct])
    result.structure_classification = [x[7] for x in saved_struct]
    result.hazus_structure_classification = [x[8] for x in saved_struct]
    result.bid = num.array([x[9] for x in saved_struct])
    result.fcb_usage = num.array([x[10] for x in saved_struct])
    result.hazus_usage = [x[11] for x in saved_struct]

    return result
Beispiel #18
0
def regressionTest(path=None):
    """
    Args:
      path: The directory to look for files.
        Assumed to be eqrm_code.
    Return:
      suite: The test suit to use with the unit tests
      moduleNames: The names of all the modules.  This
        can be used to import all the modules. Used
        in do_coverage.
    """

    if path is None:
        path = util.determine_eqrm_path()

    print
    if False:  # I never really looked at this info
        print "The following directories will be skipped over;"
        for dir in EXCLUDE_DIRS:
            print dir,
            print "\n"

    # print 'Recursing into;'
    test_files, subdirectories = get_test_files(path)
    files = [x for x in test_files if not x == 'test_all.py']
    print 'Testing path %s:' % path
    if False:  # I never really looked at this info
        print
        print 'Files tested;'
        #print_files = []
        for file in files:
            #print_files += file + ' '
            print file + ',',
        print
    print
    if 'EXCLUDE_FILES' in globals():
        for file in EXCLUDE_FILES:
            print 'WARNING: File ' + file + ' to be excluded from testing'
            try:
                files.remove(file)
            except ValueError as e:
                msg = 'File "%s" was not found in test suite.\n' % file
                msg += 'Original error is "%s"\n' % e
                msg += 'Perhaps it should be removed from exclude list?'
                raise Exception(msg)

    filenameToModuleName = lambda f: os.path.splitext(f)[0]
    moduleNames = map(filenameToModuleName, files)
    # print "moduleNames", moduleNames
    # Note, if there are duplicate file names, from different directories
    # only one of them will be used.
    # Add directorys to the sys. path,
    # so the load works.
    for file in subdirectories:
        sys.path.append(file)
    sys.path.append(path)
    modules = map(__import__, moduleNames)
    # sys.path.remove(path)
    # Fix up the system path
    for file in subdirectories:
        sys.path.remove(file)
    load = unittest.defaultTestLoader.loadTestsFromModule
    testCaseClasses = map(load, modules)
    return unittest.TestSuite(testCaseClasses), moduleNames
Beispiel #19
0
 
Version: $Revision$  
ModifiedBy: $Author$
ModifiedDate: $Date$

Copyright 2007 by Geoscience Australia

"""


import os

import eqrm_code.util as util


eqrm_path = util.determine_eqrm_path()


# define various paths
Resources_Data_Path = os.path.join(eqrm_path, 'resources', 'data')

demo_path = os.path.join(eqrm_path, 'demo')

Implementation_Path = os.path.join(eqrm_path, 'implementation_tests')

mini_scenario_scenarios_path = os.path.join(eqrm_path, 'implementation_tests',
                                  'mini_scenarios',
                                  'scenarios')

mini_scenario_Path = os.path.join(eqrm_path, 'implementation_tests',
                                  'mini_scenarios')
Beispiel #20
0
def obsolete_convert_Py2Mat_Risk(site_tag, datadir, param_t, default_data=None):
    """Get python data as attributes of object.

    site_tag     Site Location (?) should get from PARAM_T (whatever *that* is)
    datadir      path to directory containing data
    param_t      name of the PARAM_T file
    default_data path to default data directory

    Returns a data object.  See module docstring.
    """

    # get EQRM root path
    eqrm_path = util.determine_eqrm_path()

    # handle missing default_data
    if default_data is None:
        default_data = os.path.join(eqrm_path,
                                    eqrm_filesystem.Resources_Data_Path)

    # easy stuff
    saved_ecbval2_file = os.path.join(eqrm_path,
                                      eqrm_filesystem.Demo_Output_ProbRisk_Path,
                                      site_tag + '_bval.txt')
    saved_ecbval2 = load_data(saved_ecbval2_file)
    saved_ecbval2 = num.array(saved_ecbval2).flat[:]    # make a row vector

    saved_rjb_file = os.path.join(eqrm_path,
                                  eqrm_filesystem.Demo_Output_ProbRisk_Path,
                                  site_tag + '_distance_rjb.txt')
    saved_rjb = load_data(saved_rjb_file)
    saved_rjb = saved_rjb.transpose()

    # get loss values - note: we only do contents+building
    saved_ecloss_file = os.path.join(eqrm_path,
                                     eqrm_filesystem.Demo_Output_ProbRisk_Path,
                                     site_tag + '_total_building_loss.txt')
    saved_ecloss = load_data(saved_ecloss_file)
    saved_ecloss = saved_ecloss[1:]     # strip building ID row
    saved_ecloss = saved_ecloss.transpose()

    # get aus_mag and nu data
    tmp1_file = os.path.join(eqrm_path,
                             eqrm_filesystem.Demo_Output_ProbRisk_Path,
                             site_tag + '_event_set.txt')
    tmp1 = load_data(tmp1_file)
    nu = tmp1[:,8]
    aus_mag = tmp1[:,9]

    # structure information
    saved_struct_file = os.path.join(eqrm_path,
                                     eqrm_filesystem.Demo_Output_ProbRisk_Path,
                                     site_tag + '_structures.txt')
    print('saved_struct_file=%s' % saved_struct_file)
    saved_struct = load_struct_data(saved_struct_file)
    # convert certain fields in-situ
    convert_btype_to_btypeindex(saved_struct, default_data)
    convert_uses_to_usesindex(saved_struct, default_data)
    convert_suburb_to_suburbindex(saved_struct, default_data)

    # create result object, add data attributes
    result = Result()
    
    result.ecbval2 = num.array(saved_ecbval2)
    result.ecloss = num.array(saved_ecloss)
    result.rjb = num.array(saved_rjb)
    result.aus_mag = num.array(aus_mag)
    result.nu = num.array(nu)

    # convert structures array to lists, add to data attributes
    result.latitude = num.array([x[0] for x in saved_struct])
    result.longitude = num.array([x[1] for x in saved_struct])
    result.pre1989 = [x[2] for x in saved_struct]
    result.postcode = num.array([x[3] for x in saved_struct])
    result.site_class = [x[4] for x in saved_struct]
    result.suburb = [x[5] for x in saved_struct]
    result.survey_factor = num.array([x[6] for x in saved_struct])
    result.structure_classification = [x[7] for x in saved_struct]
    result.hazus_structure_classification = [x[8] for x in saved_struct]
    result.bid = num.array([x[9] for x in saved_struct])
    result.fcb_usage = num.array([x[10] for x in saved_struct])
    result.hazus_usage = [x[11] for x in saved_struct]

    return result