Пример #1
0
    def from_csv(cls, file):
        """Construct Bridges instance from csv file.

        file                   open file handle for site data
        attribute_conversions  dictionary defining required data from file and
                               format of the data
        use:
            X = Bridges.from_csv('blg_wr.csv', PEOPLE=float,
                                 WALLS=str, ROOF_TYPE=str)
        or:
            d = {'PEOPLE': float, 'WALLS': str, 'ROOF_TYPE': str}
            X = Bridges.from_csv('blg_wr.csv', **d)
        """

        # read in data from file
        bridges_dict = csv_to_arrays(file, **attribute_conversions)

        # remove lat&lon from attributes dictionary
        latitude = bridges_dict.pop("LATITUDE")
        longitude = bridges_dict.pop("LONGITUDE")

        # copy remaining attributes - don't need user changes reflected
        attributes = copy.copy(bridges_dict)

        # call class constructor
        return cls(latitude, longitude, **attributes)
Пример #2
0
    def from_csv(cls, file, **attribute_conversions):
        """Construct Site instance from csv file.

        file                   open file handle for site data
        attribute_conversions  dictionary defining required data from file and
                               format of the data
        use:
            X = Sites.from_csv('blg_wr.csv', PEOPLE=float,
                               WALLS=str, ROOF_TYPE=str)
        or:
            d = {'PEOPLE': float, 'WALLS': str, 'ROOF_TYPE': str}
            X = Sites.from_csv('blg_wr.csv', **d)
        """

        # force lat & lon - required attributes
        attribute_conversions["LATITUDE"] = float
        attribute_conversions["LONGITUDE"] = float

        # remove VS30 if exists - we'll deal with this later
        if attribute_conversions.get('VS30') is not None:
            attribute_conversions.pop('VS30')

        # read in data from file
        sites_dict = csv_to_arrays(file, **attribute_conversions)

        # remove lat&lon from attributes dictionary
        latitude = sites_dict.pop("LATITUDE")
        longitude = sites_dict.pop("LONGITUDE")

        # copy remaining attributes - don't need user changes reflected
        attributes = copy.copy(sites_dict)

        # now lets do VS30
        try:
            attribute_conversions['VS30'] = float
            sites_dict = csv_to_arrays(file, **attribute_conversions)
            attributes['Vs30'] = sites_dict['VS30']
        except:
            # If not present in the file, analysis will add the mapping based
            # on site class
            pass

        # call class constructor
        return cls(latitude, longitude, **attributes)
Пример #3
0
    def from_csv(cls, file, **attribute_conversions):
        """Construct Site instance from csv file.

        file                   open file handle for site data
        attribute_conversions  dictionary defining required data from file and
                               format of the data
        use:
            X = Sites.from_csv('blg_wr.csv', PEOPLE=float,
                               WALLS=str, ROOF_TYPE=str)
        or:
            d = {'PEOPLE': float, 'WALLS': str, 'ROOF_TYPE': str}
            X = Sites.from_csv('blg_wr.csv', **d)
        """

        # force lat & lon - required attributes
        attribute_conversions["LATITUDE"] = float
        attribute_conversions["LONGITUDE"] = float

        # remove VS30 if exists - we'll deal with this later
        if attribute_conversions.get('VS30') is not None:
            attribute_conversions.pop('VS30')

        # read in data from file
        sites_dict = csv_to_arrays(file, **attribute_conversions)

        # remove lat&lon from attributes dictionary
        latitude = sites_dict.pop("LATITUDE")
        longitude = sites_dict.pop("LONGITUDE")

        # copy remaining attributes - don't need user changes reflected
        attributes = copy.copy(sites_dict)

        # now lets do VS30
        try:
            attribute_conversions['VS30'] = float
            sites_dict = csv_to_arrays(file, **attribute_conversions)
            attributes['Vs30'] = sites_dict['VS30']
        except:
            # If not present in the file, analysis will add the mapping based
            # on site class
            pass

        # call class constructor
        return cls(latitude, longitude, **attributes)
Пример #4
0
def calc_loss_deagg_suburb(bval_path_file, total_building_loss_path_file, site_db_path_file, file_out):
    """ Given EQRM ouput data, produce a csv file showing loss per suburb

    The produced csv file shows total building loss, total building
    value and loss as a percentage.  All of this is shown per suburb.

    bval_path_file - location and name of building value file produced by EQRM
    total_building_loss_path_file - location and name of the total building
      loss file
    site_db_path_file - location and name of the site database file

    Note: This can be generalised pretty easily, to get results
          deaggregated on other columns of the site_db
    """
    aggregate_on = ["SUBURB"]

    # Load all of the files.
    site = csv_to_arrays(site_db_path_file, **attribute_conversions)
    # print "site", site
    bvals = loadtxt(bval_path_file, dtype=scipy.float64, delimiter=",", skiprows=0)
    # print "bvals", bvals
    # print "len(bvals", len(bvals)

    total_building_loss = loadtxt(total_building_loss_path_file, dtype=scipy.float64, delimiter=" ", skiprows=1)
    # print "total_building_loss", total_building_loss
    # print "total_building_loss shape", total_building_loss.shape
    site_count = len(site["BID"])
    assert site_count == len(bvals)
    assert site_count == total_building_loss.shape[1]
    # For aggregates
    # key is the unique AGGREGATE_ON combination .eg ('Hughes', 2605,...)
    # Values are a list of indices where the combinations are repeated in site
    aggregates = {}
    for i in range(site_count):
        assert site["BID"][i] == int(total_building_loss[0, i])
        marker = []
        for name in aggregate_on:
            marker.append(site[name][i])
        marker = tuple(marker)
        aggregates.setdefault(marker, []).append(i)
    # print "aggregates", aggregates

    handle = csv.writer(open(file_out, "w"), lineterminator="\n")

    handle.writerow(["percent losses (building and content) by suburb"])
    handle.writerow(["suburb", "loss", "value", "percent loss"])
    handle.writerow(["", " ($ millions)", " ($ millions)", ""])
    keys = aggregates.keys()
    keys.sort()
    for key in keys:
        sum_loss = 0
        sum_bval = 0
        for row in aggregates[key]:
            sum_loss += total_building_loss[1][row]
            sum_bval += bvals[row]
        handle.writerow([key[0], sum_loss / 1000000.0, sum_bval / 1000000.0, sum_loss / sum_bval * 100.0])
Пример #5
0
def write_aggregate_read_csv(attribute_dic=None, buildings_usage_classification="HAZUS"):
    handle, file_name = tempfile.mkstemp(".csv", "test_aggregate_csv_")
    os.close(handle)
    file_name, attribute_dic = write_test_file(file_name, attribute_dic)

    attribute_conversions_extended = copy.deepcopy(attribute_conversions)
    attribute_conversions_extended["UFI"] = int
    file_out = file_name + "out"
    aggregate_building_db(file_name, file_out)
    site = csv_to_arrays(file_out, **attribute_conversions_extended)
    os.remove(file_name)
    os.remove(file_out)
    return attribute_dic, site
Пример #6
0
    def test_csv_to_arrays(self):
        (handle, file_name) = tempfile.mkstemp('.csv', 'test_csv_interface_')
        os.close(handle)

        f = open(file_name, "wb")
        f.write('\n'.join(self.dummy_f))
        f.close()

        lon = csvi.csv_to_arrays(file_name, LONGITUDE=float)
        assert lon.keys()[0] == 'LONGITUDE'
        assert len(lon.keys()) == 1
        assert scipy.alltrue(self.LONGITUDE == lon['LONGITUDE'])

        all_conversions = {'LONGITUDE': float, 'LATITUDE': float, 'WALLS': str}

        all = csvi.csv_to_arrays(file_name, **all_conversions)
        assert len(all.keys()) == 3
        assert scipy.alltrue(self.LATITUDE == all['LATITUDE'])
        assert scipy.alltrue(self.LONGITUDE == all['LONGITUDE'])
        assert scipy.alltrue(self.WALLS == all['WALLS'])

        os.remove(file_name)
Пример #7
0
def write_aggregate_read_csv(attribute_dic=None,
                             buildings_usage_classification='HAZUS'):
    handle, file_name = tempfile.mkstemp('.csv', 'test_aggregate_csv_')
    os.close(handle)
    file_name, attribute_dic = write_test_file(file_name, attribute_dic)

    attribute_conversions_extended = copy.deepcopy(attribute_conversions)
    attribute_conversions_extended['UFI'] = int
    file_out = file_name + "out"
    aggregate_building_db(file_name, file_out)
    site = csv_to_arrays(file_out, **attribute_conversions_extended)
    os.remove(file_name)
    os.remove(file_out)
    return attribute_dic, site
Пример #8
0
    def test_csv_to_arrays(self):
        (handle, file_name) = tempfile.mkstemp('.csv', 'test_csv_interface_')
        os.close(handle)

        f = open(file_name,"wb")
        f.write('\n'.join(self.dummy_f))
        f.close()
        
        lon = csvi.csv_to_arrays(file_name, LONGITUDE=float)
        assert lon.keys()[0] == 'LONGITUDE'
        assert len(lon.keys()) == 1
        assert scipy.alltrue(self.LONGITUDE == lon['LONGITUDE'])

        all_conversions = {'LONGITUDE': float,
                           'LATITUDE': float,
                           'WALLS': str}

        all = csvi.csv_to_arrays(file_name, **all_conversions)
        assert len(all.keys()) == 3
        assert scipy.alltrue(self.LATITUDE == all['LATITUDE'])
        assert scipy.alltrue(self.LONGITUDE == all['LONGITUDE'])
        assert scipy.alltrue(self.WALLS == all['WALLS'])

        os.remove(file_name)
    def from_csv(cls, sites_filename, eqrm_flags):
        """Read structures data from a file.
        Extract structure parameters from building_parameters_table.

        parameters
          sites_filename is actually a file handle
        """

        sites_dict = csv_to_arrays(sites_filename, **attribute_conversions)

        latitude = sites_dict.pop("LATITUDE")
        longitude = sites_dict.pop("LONGITUDE")

        # create copy of attributes
        attributes = copy.copy(sites_dict)

        # Create the vulnerability curves
        vulnerability_set = Vulnerability_Set.from_xml(eqrm_flags)

        # create structures:
        return cls(latitude, longitude, vulnerability_set, **attributes)
Пример #10
0
    def from_csv(cls, sites_filename, eqrm_flags):
        """Read structures data from a file.
        Extract structure parameters from building_parameters_table.

        parameters
          sites_filename is actually a file handle
        """

        sites_dict = csv_to_arrays(sites_filename, **attribute_conversions)

        latitude = sites_dict.pop("LATITUDE")
        longitude = sites_dict.pop("LONGITUDE")

        # create copy of attributes
        attributes = copy.copy(sites_dict)

        # Create the vulnerability curves
        vulnerability_set = Vulnerability_Set.from_xml(eqrm_flags)

        # create structures:
        return cls(latitude, longitude, vulnerability_set, **attributes)
Пример #11
0
def get_lat_long_eloss(structure_dir_file, output_dir,
                       total_buildinging_loss_file, out_file):
    """
    From the EQRM input dir load a structure csv, to get the lats and longs.

    From the EQRM output dir load the _total_buildinging_loss.txt file
    to get the building loss.  File format: First row comments , 2nd
    building id subsequent rows are events.  This will just take the
    first event.

    """

    total_building_loss = loadtxt(path.join(output_dir,
                                            total_buildinging_loss_file),
                                  dtype=scipy.float64,
                                  delimiter=' ',
                                  skiprows=1)
    print "total_building_loss shape", total_building_loss.shape
    # BID is int(total_building_loss[0,i]
    # eloss is int(total_building_loss[1,i]

    sites_dict = csv_to_arrays(structure_dir_file, **attribute_conversions)
    print "len(sites_dict['BID'])", len(sites_dict['BID'])

    out_h = open(path.join(output_dir, out_file), 'w')
    for i in range(len(sites_dict['BID'])):
        if not sites_dict['BID'][i] == total_building_loss[0, i]:
            print "Don't match ", i
            import sys
            sys.exit()
        else:
            line_list = [
                str(sites_dict['LATITUDE'][i]),
                str(sites_dict['LONGITUDE'][i]),
                str(total_building_loss[1, i])
            ]
            line = ','.join(line_list) + '\n'
            out_h.write(line)
    out_h.close()
Пример #12
0
def calc_loss_deagg_suburb(bval_path_file, total_building_loss_path_file,
                           site_db_path_file, file_out):
    """ Given EQRM ouput data, produce a csv file showing loss per suburb

    The produced csv file shows total building loss, total building
    value and loss as a percentage.  All of this is shown per suburb.

    bval_path_file - location and name of building value file produced by EQRM
    total_building_loss_path_file - location and name of the total building
      loss file
    site_db_path_file - location and name of the site database file

    Note: This can be generalised pretty easily, to get results
          deaggregated on other columns of the site_db
    """
    aggregate_on = ['SUBURB']

    # Load all of the files.
    site = csv_to_arrays(site_db_path_file,
                         **attribute_conversions)
    # print "site", site
    bvals = loadtxt(bval_path_file, dtype=scipy.float64,
                    delimiter=',', skiprows=0)
    # print "bvals", bvals
    # print "len(bvals", len(bvals)

    total_building_loss = loadtxt(total_building_loss_path_file,
                                  dtype=scipy.float64,
                                  delimiter=' ', skiprows=1)
    # print "total_building_loss", total_building_loss
    # print "total_building_loss shape", total_building_loss.shape
    site_count = len(site['BID'])
    assert site_count == len(bvals)
    assert site_count == total_building_loss.shape[1]
    # For aggregates
    # key is the unique AGGREGATE_ON combination .eg ('Hughes', 2605,...)
    # Values are a list of indices where the combinations are repeated in site
    aggregates = {}
    for i in range(site_count):
        assert site['BID'][i] == int(total_building_loss[0, i])
        marker = []
        for name in aggregate_on:
            marker.append(site[name][i])
        marker = tuple(marker)
        aggregates.setdefault(marker, []).append(i)
    # print "aggregates", aggregates

    handle = csv.writer(open(file_out, 'w'), lineterminator='\n')

    handle.writerow(['percent losses (building and content) by suburb'])
    handle.writerow(['suburb', 'loss', 'value', 'percent loss'])
    handle.writerow(['', ' ($ millions)', ' ($ millions)', ''])
    keys = aggregates.keys()
    keys.sort()
    for key in keys:
        sum_loss = 0
        sum_bval = 0
        for row in aggregates[key]:
            sum_loss += total_building_loss[1][row]
            sum_bval += bvals[row]
        handle.writerow([key[0], sum_loss / 1000000., sum_bval / 1000000.,
                         sum_loss / sum_bval * 100.])
Пример #13
0
def aggregate_building_db(sites_filename_in, sites_filename_out=None):
    """
    Aggregate building data base data.
    All structures with the same 'POSTCODE',
                'SITE_CLASS',
                'STRUCTURE_CLASSIFICATION',
                'FCB_USAGE',
                'PRE1989',
                'STRUCTURE_CATEGORY',
                'HAZUS_USAGE',
                'SUBURB',
                'HAZUS_STRUCTURE_CLASSIFICATION'

                Are aggregated as one structure.

    These attributes are averaged;
                                  'FLOOR_AREA',
                                  'LATITUDE',
                                  'LONGITUDE',
                                  'CONTENTS_COST_DENSITY',
                                  'BUILDING_COST_DENSITY'

    #FIXME DSG Add a column that gives the BID's (building ID) from the
                                  unaggredated data for each structure

    """
    if sites_filename_out is None:
        sites_filename_out = sites_filename_in

    # if aggregate_on is None:
    aggregate_on = AGGREGATE_ON

    # if average_on is None:
    average_on = AVERAGE_ON

    survey_factor = 'SURVEY_FACTOR'
    ufi_name = 'BID'

    site = csv_to_arrays(sites_filename_in,
                         **attribute_conversions)
    writer = Building_db_writer(sites_filename_out)
    writer.write_header()

    # For aggregates
    # key is the unique AGGREGATE_ON combination .eg ('Hughes', 2605,...)
    # Values are a list of indices where the combinations are repeated in site
    aggregates = {}
    for i in range(len(site['BID'])):
        # print "site['BID'][i]",site['BID'][i]
        marker = []
        for name in aggregate_on:
            marker.append(site[name][i])
        marker = tuple(marker)
        aggregates.setdefault(marker, []).append(i)

    for agg, agg_indexes in aggregates.iteritems():

        # An agg_indexes is a collection of rows, specified by index,
        # to be aggregated
        row_sum = zeros(len(average_on), dtype=float)
        survey_factor_sum = 0.0
        UFI = site[ufi_name][agg_indexes[0]]
        for row in agg_indexes:
            survey_factor_sum += site[survey_factor][row]
            for i, name in enumerate(average_on):
                row_sum[i] += site[name][row] * site[survey_factor][row]
        row_aggrigate = row_sum / survey_factor_sum
        writer.write_row(agg, row_aggrigate, survey_factor_sum, UFI)
Пример #14
0
    def test_events_shaking_a_site(self):
        # Parameters
        output_dir = self.dir
        site_tag = 'ernabella'
        site_lat = -31.5
        site_lon = 150.5
        period = 1.0
        soil_amp = False

        # 1. Create and save analysis objects objects
        self.save_analysis_objects(output_dir, site_tag)

        # 2. Run through events_shaking_a_site
        events_filename = events_shaking_a_site(output_dir, site_tag, site_lat,
                                                site_lon, period, soil_amp)

        # 3. Read in generated CSV to a dict
        events_attributes = {
            'ground_motion': float,
            'ground_motion_model': str,
            'trace_start_lat': float,
            'trace_start_lon': float,
            'trace_end_lat': float,
            'trace_end_lon': float,
            'rupture_centroid_lat': float,
            'rupture_centroid_lon': float,
            'depth': float,
            'azimuth': float,
            'dip': float,
            'Mw': float,
            'length': float,
            'width': float,
            'activity': float,
            'Rjb': float,
            'Rrup': float,
            'site_lat': float,
            'site_lon': float
        }
        events_arrays = csv_to_arrays(events_filename, **events_attributes)

        # 4. Expected results
        expected_ground_motion = asarray([1, 4, 7, 10, 13])  # period == 1.0
        expected_ground_motion_model = asarray([
            'Allen', 'Toro_1997_midcontinent', 'Sadigh_97',
            'Youngs_97_interface', 'Youngs_97_intraslab'
        ])

        # Manually calculated
        expected_trace_start_lat = -29.98204065 * ones(5)
        expected_trace_start_lon = 149.25728051 * ones(5)
        expected_trace_end_lat = -29.97304727 * ones(5)
        expected_trace_end_lon = 149.25764315 * ones(5)

        # Input values
        expected_rupture_centroid_lat = -30 * ones(5)
        expected_rupture_centroid_lon = 150 * ones(5)
        expected_length = 1.0 * ones(5)
        expected_azimuth = 2.0 * ones(5)
        expected_width = 3.0 * ones(5)
        expected_dip = 4.0 * ones(5)
        expected_depth = 5.0 * ones(5)
        expected_Mw = 6.0 * ones(5)

        # Same as atten_model_weights
        expected_activity = 0.2 * ones(5)

        # Manually calculated
        expected_Rjb = 110.59526125 * ones(5)

        # wrong value
        #expected_Rrup = 131.86940242*ones(5)

        # Different to input site lat/lon
        expected_site_lat = -31 * ones(5)
        expected_site_lon = 150 * ones(5)

        # 5. Compare results
        self.assert_(
            allclose(expected_ground_motion, events_arrays['ground_motion']))

        self.assert_(
            string_array_equal(expected_ground_motion_model,
                               events_arrays['ground_motion_model']))

        self.assert_(
            allclose(expected_trace_start_lat,
                     events_arrays['trace_start_lat']))

        self.assert_(
            allclose(expected_trace_start_lon,
                     events_arrays['trace_start_lon']))

        self.assert_(
            allclose(expected_trace_end_lat, events_arrays['trace_end_lat']))

        self.assert_(
            allclose(expected_trace_end_lon, events_arrays['trace_end_lon']))

        self.assert_(
            allclose(expected_rupture_centroid_lat,
                     events_arrays['rupture_centroid_lat']))

        self.assert_(
            allclose(expected_rupture_centroid_lon,
                     events_arrays['rupture_centroid_lon']))

        self.assert_(allclose(expected_length, events_arrays['length']))

        self.assert_(allclose(expected_azimuth, events_arrays['azimuth']))

        self.assert_(allclose(expected_width, events_arrays['width']))

        self.assert_(allclose(expected_dip, events_arrays['dip']))

        self.assert_(allclose(expected_depth, events_arrays['depth']))

        self.assert_(allclose(expected_Mw, events_arrays['Mw']))

        self.assert_(allclose(expected_Rjb, events_arrays['Rjb']))

        #self.assert_(allclose(expected_Rrup,
        #                      events_arrays['Rrup']))

        self.assert_(allclose(expected_activity, events_arrays['activity']))

        self.assert_(allclose(expected_site_lat, events_arrays['site_lat']))

        self.assert_(allclose(expected_site_lon, events_arrays['site_lon']))
def aggregate_building_db(sites_filename_in, sites_filename_out=None):
    """
    Aggregate building data base data.
    All structures with the same 'POSTCODE',
                'SITE_CLASS',
                'STRUCTURE_CLASSIFICATION',
                'FCB_USAGE',
                'PRE1989',
                'STRUCTURE_CATEGORY',
                'HAZUS_USAGE',
                'SUBURB',
                'HAZUS_STRUCTURE_CLASSIFICATION'

                Are aggregated as one structure.

    These attributes are averaged;
                                  'FLOOR_AREA',
                                  'LATITUDE',
                                  'LONGITUDE',
                                  'CONTENTS_COST_DENSITY',
                                  'BUILDING_COST_DENSITY'

    #FIXME DSG Add a column that gives the BID's (building ID) from the
                                  unaggredated data for each structure

    """
    if sites_filename_out is None:
        sites_filename_out = sites_filename_in

    # if aggregate_on is None:
    aggregate_on = AGGREGATE_ON

    # if average_on is None:
    average_on = AVERAGE_ON

    survey_factor = 'SURVEY_FACTOR'
    ufi_name = 'BID'

    site = csv_to_arrays(sites_filename_in,
                         **attribute_conversions)
    writer = Building_db_writer(sites_filename_out)
    writer.write_header()

    # For aggregates
    # key is the unique AGGREGATE_ON combination .eg ('Hughes', 2605,...)
    # Values are a list of indices where the combinations are repeated in site
    aggregates = {}
    for i in range(len(site['BID'])):
        # print "site['BID'][i]",site['BID'][i]
        marker = []
        for name in aggregate_on:
            marker.append(site[name][i])
        marker = tuple(marker)
        aggregates.setdefault(marker, []).append(i)

    for agg, agg_indexes in aggregates.iteritems():

        # An agg_indexes is a collection of rows, specified by index,
        # to be aggregated
        row_sum = zeros(len(average_on), dtype=float)
        survey_factor_sum = 0.0
        UFI = site[ufi_name][agg_indexes[0]]
        for row in agg_indexes:
            survey_factor_sum += site[survey_factor][row]
            for i, name in enumerate(average_on):
                row_sum[i] += site[name][row] * site[survey_factor][row]
        row_aggrigate = row_sum / survey_factor_sum
        writer.write_row(agg, row_aggrigate, survey_factor_sum, UFI)
Пример #16
0
    def from_csv(cls,
                 sites_filename,
                 building_classification_tag,
                 damage_extent_tag,
                 default_input_dir,
                 input_dir=None,
                 eqrm_dir=None,
                 buildings_usage_classification='HAZUS',
                 use_refined_btypes=True,
                 force_btype_flag=False,
                 determ_btype=None,
                 determ_buse=None,
                 loss_aus_contents=0):
        """Read structures data from a file.
        Extract strucuture parameters from building_parameters_table.

        parameters
          sites_filename is actually a file handle

            - some parameters depend on structure classification (hazus or
              refined hazus, depending on use_refined_btypes)

            - some parameters depend on usage (fcb or hazus, depending on
              use_fcb_usage)

            - force_btype_flag will force all buildings to be of type
              determ_btype, and usage determ_buse.

              See page 11 of the tech manual to understand the flags

              refined_btypes = Edwards building classification
        """

        if eqrm_dir is None:
            eqrm_dir = determine_eqrm_path(__file__)

        if force_btype_flag:
            raise NotImplementedError
        sites_dict = csv_to_arrays(sites_filename, **attribute_conversions)

        latitude = sites_dict.pop("LATITUDE")
        longitude = sites_dict.pop("LONGITUDE")

        # create copy of attributes
        attributes = copy.copy(sites_dict)

        # Do we use refined_btypes or hazus btypes?
        if not use_refined_btypes:
            attributes['STRUCTURE_CLASSIFICATION'] = \
                attributes['HAZUS_STRUCTURE_CLASSIFICATION']

        # building_parameters_table has alot of info read in from
        # varrious files in resources data.
        # it is a dic, when the key signifies what the info is about, eg
        # height, ductility.  The index of the data represents the structure
        # classification, which is stored in
        # building_parameters_table['structure_classification']
        building_parameters_table = \
            building_params_from_csv(building_classification_tag,
                                     damage_extent_tag,
                                     default_input_dir=default_input_dir,
                                     input_dir=input_dir)

        # get index that maps attributes ->
        #    building_parameters_table (joined by 'structure_classification')
        structure_classification = attributes['STRUCTURE_CLASSIFICATION']
        building_parameter_index = \
            get_index(
                building_parameters_table['structure_classification'],
                structure_classification)

        # Now extract building_parameters from the table,
        # using building_parameter_index
        building_parameters = {}
        for key in building_parameters_table.keys():
            building_parameters[key] = \
                building_parameters_table[key][building_parameter_index]

        # Get non-structural drift thesholds (depending on whether or not
        # they are residential)
        # extract usage dependent parameters
        if buildings_usage_classification is 'HAZUS':
            usages = attributes['HAZUS_USAGE']
            is_residential = (array([(usage[0:4] in ['RES1', 'RES2', 'RES3'])
                                     for usage in usages]))
        elif buildings_usage_classification is 'FCB':
            usages = attributes['FCB_USAGE']
            is_residential = ((usages <= 113) + (usages == 131))
        else:
            msg = ('b_usage_type_flat = ' + str(buildings_usage_classification)
                   + " not 'FCB' or 'HAZUS'")
            raise ValueError(msg)

        is_residential.shape = (-1, 1)  # reshape so it can be broadcast
        nr = building_parameters['non_residential_drift_threshold']
        r = building_parameters['residential_drift_threshold']
        drift_threshold = r * is_residential + nr * (1 - is_residential)
        building_parameters['drift_threshold'] = drift_threshold

        is_residential.shape = (-1,)
        if loss_aus_contents == 1:
            attributes['CONTENTS_COST_DENSITY'] *= \
                (is_residential * 0.6 + (is_residential - 1)
                 * 1.0)  # reduce contents

        rcp = build_replacement_ratios(usages, buildings_usage_classification)
        building_parameters['structure_ratio'] = rcp['structural']
        building_parameters['nsd_d_ratio'] = \
            rcp['nonstructural drift sensitive']
        building_parameters['nsd_a_ratio'] = \
            rcp['nonstructural acceleration sensitive']

        # create structures:
        return cls(latitude, longitude, building_parameters, **attributes)
Пример #17
0
    def from_csv(cls,
                 sites_filename,
                 building_classification_tag,
                 damage_extent_tag,
                 default_input_dir,
                 input_dir=None,
                 eqrm_dir=None,
                 buildings_usage_classification='HAZUS',
                 use_refined_btypes=True,
                 force_btype_flag=False,
                 determ_btype=None,
                 determ_buse=None,
                 loss_aus_contents=0):
        """Read structures data from a file.
        Extract strucuture parameters from building_parameters_table.

        parameters
          sites_filename is actually a file handle

            - some parameters depend on structure classification (hazus or
              refined hazus, depending on use_refined_btypes)

            - some parameters depend on usage (fcb or hazus, depending on
              use_fcb_usage)

            - force_btype_flag will force all buildings to be of type
              determ_btype, and usage determ_buse.

              See page 11 of the tech manual to understand the flags

              refined_btypes = Edwards building classification
        """

        if eqrm_dir is None:
            eqrm_dir = determine_eqrm_path(__file__)

        if force_btype_flag:
            raise NotImplementedError
        sites_dict = csv_to_arrays(sites_filename, **attribute_conversions)

        latitude = sites_dict.pop("LATITUDE")
        longitude = sites_dict.pop("LONGITUDE")

        # create copy of attributes
        attributes = copy.copy(sites_dict)

        # Do we use refined_btypes or hazus btypes?
        if not use_refined_btypes:
            attributes['STRUCTURE_CLASSIFICATION'] = \
                attributes['HAZUS_STRUCTURE_CLASSIFICATION']

        # building_parameters_table has alot of info read in from
        # varrious files in resources data.
        # it is a dic, when the key signifies what the info is about, eg
        # height, ductility.  The index of the data represents the structure
        # classification, which is stored in
        # building_parameters_table['structure_classification']
        building_parameters_table = \
            building_params_from_csv(building_classification_tag,
                                     damage_extent_tag,
                                     default_input_dir=default_input_dir,
                                     input_dir=input_dir)

        # get index that maps attributes ->
        #    building_parameters_table (joined by 'structure_classification')
        structure_classification = attributes['STRUCTURE_CLASSIFICATION']
        building_parameter_index = \
            get_index(
                building_parameters_table['structure_classification'],
                structure_classification)

        # Now extract building_parameters from the table,
        # using building_parameter_index
        building_parameters = {}
        for key in building_parameters_table.keys():
            building_parameters[key] = \
                building_parameters_table[key][building_parameter_index]

        # Get non-structural drift thesholds (depending on whether or not
        # they are residential)
        # extract usage dependent parameters
        if buildings_usage_classification is 'HAZUS':
            usages = attributes['HAZUS_USAGE']
            is_residential = (array([(usage[0:4] in ['RES1', 'RES2', 'RES3'])
                                     for usage in usages]))
        elif buildings_usage_classification is 'FCB':
            usages = attributes['FCB_USAGE']
            is_residential = ((usages <= 113) + (usages == 131))
        else:
            msg = ('b_usage_type_flat = ' +
                   str(buildings_usage_classification) +
                   " not 'FCB' or 'HAZUS'")
            raise ValueError(msg)

        is_residential.shape = (-1, 1)  # reshape so it can be broadcast
        nr = building_parameters['non_residential_drift_threshold']
        r = building_parameters['residential_drift_threshold']
        drift_threshold = r * is_residential + nr * (1 - is_residential)
        building_parameters['drift_threshold'] = drift_threshold

        is_residential.shape = (-1, )
        if loss_aus_contents == 1:
            attributes['CONTENTS_COST_DENSITY'] *= \
                (is_residential * 0.6 + (is_residential - 1)
                 * 1.0)  # reduce contents

        rcp = build_replacement_ratios(usages, buildings_usage_classification)
        building_parameters['structure_ratio'] = rcp['structural']
        building_parameters['nsd_d_ratio'] = \
            rcp['nonstructural drift sensitive']
        building_parameters['nsd_a_ratio'] = \
            rcp['nonstructural acceleration sensitive']

        # create structures:
        return cls(latitude, longitude, building_parameters, **attributes)
Пример #18
0
def building_params_from_csv(building_classification_tag,
                             damage_extent_tag,
                             default_input_dir,
                             input_dir=None):
    """create building parameters dictionary
    This loads the building_parameters_workshop file, as well
    as other building related files that are hard-coded in this function.

    Args:
      csv_name: The name of the parameter file with the _params.csv
      part removed, eg building_parameters_workshop_3
      main_dir: the python_eqrm directory

    Returns:
      A dict with the parameter info.
    """

    # Get the data out of _params.csv
    attribute_conversions = {}
    attribute_conversions['structure_class'] = str
    attribute_conversions['structure_classification'] = str
    for name in [
            'design_strength', 'height', 'natural_elastic_period',
            'fraction_in_first_mode', 'height_to_displacement',
            'yield_to_design', 'ultimate_to_yield', 'ductility', 'damping_s',
            'damping_m', 'damping_l', 'damping_Be', 'structural_damage_slight',
            'structural_damage_moderate', 'structural_damage_extreme',
            'structural_damage_complete'
    ]:
        attribute_conversions[name] = float
    fid = get_local_or_default(
        'building_parameters%s.csv' % building_classification_tag,
        default_input_dir, input_dir)

    #file_location = join(default_input_dir, csv_name+'_params.csv')
    building_parameters = csv_to_arrays(fid, **attribute_conversions)

    # Get the data out of _non_structural_damage_params.csv
    attribute_conversions = {}
    for name in [
            'non_residential_drift_threshold', 'residential_drift_threshold',
            'acceleration_threshold'
    ]:
        attribute_conversions[name] = float
    fid = get_local_or_default('damage_extent%s.csv' % damage_extent_tag,
                               default_input_dir, input_dir)
    building_nsd_parameters = csv_to_arrays(fid, **attribute_conversions)

    for name in [
            'non_residential_drift_threshold', 'residential_drift_threshold',
            'acceleration_threshold'
    ]:
        building_parameters[name] = building_nsd_parameters[name][newaxis, :]

    # transform the data:
    #     turn height from feet to mm
    #     multiply drift damage by height
    # return building_parameters
    cvt_in2mm = 25.40
    # Conversion for structural damage
    # building height: convert feet to inches to mm
    building_parameters['height'] *= 12 * cvt_in2mm
    height_to_displacement = building_parameters['height_to_displacement']
    height = building_parameters['height']

    # setup damage state median thresholds (also feet -> mm)
    structural_damage_slight = building_parameters.pop(
        'structural_damage_slight')
    structural_damage_moderate = building_parameters.pop(
        'structural_damage_moderate')
    structural_damage_extreme = building_parameters.pop(
        'structural_damage_extreme')
    structural_damage_complete = building_parameters.pop(
        'structural_damage_complete')
    structural_damage_threshold = asarray(
        (structural_damage_slight, structural_damage_moderate,
         structural_damage_extreme, structural_damage_complete))
    structural_damage_threshold = structural_damage_threshold.swapaxes(0, 1)

    structural_damage_threshold = structural_damage_threshold * (
        (height_to_displacement * height)[:, newaxis])
    building_parameters[
        'structural_damage_threshold'] = structural_damage_threshold

    # setup damage state median thresholds (also feet -> inches)
    non_residential_drift_threshold = building_parameters[
        'non_residential_drift_threshold']
    non_residential_drift_threshold = non_residential_drift_threshold * (
        (height_to_displacement * height)[:, newaxis])

    building_parameters[
        'non_residential_drift_threshold'] = non_residential_drift_threshold

    residential_drift_threshold = building_parameters[
        'residential_drift_threshold']
    residential_drift_threshold = residential_drift_threshold * (
        (height_to_displacement * height)[:, newaxis])
    building_parameters[
        'residential_drift_threshold'] = residential_drift_threshold

    # expand acceleleration_threshold to be the same size as the rest.
    acceleration_threshold = building_parameters['acceleration_threshold']
    acceleration_threshold = acceleration_threshold + 0 * height[:, newaxis]
    building_parameters['acceleration_threshold'] = acceleration_threshold

    return building_parameters
Пример #19
0
def building_params_from_csv(building_classification_tag,
                             damage_extent_tag,
                             default_input_dir,
                             input_dir=None):
    """create building parameters dictionary
    This loads the building_parameters_workshop file, as well
    as other building related files that are hard-coded in this function.

    Args:
      csv_name: The name of the parameter file with the _params.csv
      part removed, eg building_parameters_workshop_3
      main_dir: the python_eqrm directory

    Returns:
      A dict with the parameter info.
    """

    # Get the data out of _params.csv
    attribute_conversions = {}
    attribute_conversions['structure_class'] = str
    attribute_conversions['structure_classification'] = str
    for name in ['design_strength', 'height', 'natural_elastic_period',
                 'fraction_in_first_mode', 'height_to_displacement',
                 'yield_to_design', 'ultimate_to_yield', 'ductility',
                 'damping_s', 'damping_m', 'damping_l', 'damping_Be',
                 'structural_damage_slight', 'structural_damage_moderate',
                 'structural_damage_extreme', 'structural_damage_complete']:
        attribute_conversions[name] = float
    fid = get_local_or_default(
        'building_parameters%s.csv' % building_classification_tag,
        default_input_dir,
        input_dir)

    #file_location = join(default_input_dir, csv_name+'_params.csv')
    building_parameters = csv_to_arrays(fid, **attribute_conversions)

    # Get the data out of _non_structural_damage_params.csv
    attribute_conversions = {}
    for name in ['non_residential_drift_threshold',
                 'residential_drift_threshold', 'acceleration_threshold']:
        attribute_conversions[name] = float
    fid = get_local_or_default('damage_extent%s.csv' % damage_extent_tag,
                               default_input_dir,
                               input_dir)
    building_nsd_parameters = csv_to_arrays(fid, **attribute_conversions)

    for name in ['non_residential_drift_threshold',
                 'residential_drift_threshold',
                 'acceleration_threshold']:
        building_parameters[name] = building_nsd_parameters[name][newaxis, :]

    # transform the data:
    #     turn height from feet to mm
    #     multiply drift damage by height
    # return building_parameters
    cvt_in2mm = 25.40
    # Conversion for structural damage
    # building height: convert feet to inches to mm
    building_parameters['height'] *= 12 * cvt_in2mm
    height_to_displacement = building_parameters['height_to_displacement']
    height = building_parameters['height']

    # setup damage state median thresholds (also feet -> mm)
    structural_damage_slight = building_parameters.pop(
        'structural_damage_slight')
    structural_damage_moderate = building_parameters.pop(
        'structural_damage_moderate')
    structural_damage_extreme = building_parameters.pop(
        'structural_damage_extreme')
    structural_damage_complete = building_parameters.pop(
        'structural_damage_complete')
    structural_damage_threshold = asarray((structural_damage_slight,
                                           structural_damage_moderate,
                                           structural_damage_extreme,
                                           structural_damage_complete))
    structural_damage_threshold = structural_damage_threshold.swapaxes(0, 1)

    structural_damage_threshold = structural_damage_threshold * (
        (height_to_displacement * height)[:, newaxis])
    building_parameters[
        'structural_damage_threshold'] = structural_damage_threshold

    # setup damage state median thresholds (also feet -> inches)
    non_residential_drift_threshold = building_parameters[
        'non_residential_drift_threshold']
    non_residential_drift_threshold = non_residential_drift_threshold * (
        (height_to_displacement * height)[:, newaxis])

    building_parameters[
        'non_residential_drift_threshold'] = non_residential_drift_threshold

    residential_drift_threshold = building_parameters[
        'residential_drift_threshold']
    residential_drift_threshold = residential_drift_threshold * (
        (height_to_displacement * height)[:, newaxis])
    building_parameters[
        'residential_drift_threshold'] = residential_drift_threshold

    # expand acceleleration_threshold to be the same size as the rest.
    acceleration_threshold = building_parameters['acceleration_threshold']
    acceleration_threshold = acceleration_threshold + 0 * height[:, newaxis]
    building_parameters['acceleration_threshold'] = acceleration_threshold

    return building_parameters
Пример #20
0
    def test_events_shaking_a_site(self):
        # Parameters
        output_dir = self.dir
        site_tag = 'ernabella'
        site_lat = -31.5
        site_lon = 150.5
        period = 1.0
        soil_amp = False
        
        # 1. Create and save analysis objects objects
        self.save_analysis_objects(output_dir, site_tag)
        
        # 2. Run through events_shaking_a_site
        events_filename = events_shaking_a_site(output_dir,
                                                site_tag,
                                                site_lat,
                                                site_lon,
                                                period,
                                                soil_amp)
        
        # 3. Read in generated CSV to a dict
        events_attributes = {'ground_motion': float,
                             'ground_motion_model': str,
                             'trace_start_lat': float,
                             'trace_start_lon': float,
                             'trace_end_lat': float,
                             'trace_end_lon': float,
                             'rupture_centroid_lat': float,
                             'rupture_centroid_lon': float,
                             'depth': float,
                             'azimuth': float,
                             'dip': float,
                             'Mw': float,
                             'length': float,
                             'width': float,
                             'activity': float,
                             'Rjb': float,
                             'Rrup': float,
                             'site_lat': float,
                             'site_lon': float}
        events_arrays = csv_to_arrays(events_filename, **events_attributes)
        
        # 4. Expected results
        expected_ground_motion = asarray([1, 4, 7, 10, 13]) # period == 1.0
        expected_ground_motion_model = asarray(['Allen', 
                                                'Toro_1997_midcontinent', 
                                                'Sadigh_97', 
                                                'Youngs_97_interface', 
                                                'Youngs_97_intraslab'])
        
        # Manually calculated
        expected_trace_start_lat = -29.98204065*ones(5)
        expected_trace_start_lon = 149.25728051*ones(5)
        expected_trace_end_lat = -29.97304727*ones(5)
        expected_trace_end_lon = 149.25764315*ones(5)
        
        # Input values
        expected_rupture_centroid_lat = -30*ones(5)
        expected_rupture_centroid_lon = 150*ones(5)
        expected_length = 1.0*ones(5)
        expected_azimuth = 2.0*ones(5)
        expected_width = 3.0*ones(5)
        expected_dip = 4.0*ones(5)
        expected_depth = 5.0*ones(5)
        expected_Mw = 6.0*ones(5)
        
        # Same as atten_model_weights
        expected_activity = 0.2*ones(5) 
        
        # Manually calculated
        expected_Rjb = 110.59526125*ones(5)

        # wrong value
        #expected_Rrup = 131.86940242*ones(5)
        
        # Different to input site lat/lon
        expected_site_lat = -31*ones(5) 
        expected_site_lon = 150*ones(5)
        
        # 5. Compare results
        self.assert_(allclose(expected_ground_motion, 
                              events_arrays['ground_motion']))
        
        self.assert_(string_array_equal(expected_ground_motion_model,
                                        events_arrays['ground_motion_model']))
        
        self.assert_(allclose(expected_trace_start_lat, 
                              events_arrays['trace_start_lat']))
        
        self.assert_(allclose(expected_trace_start_lon, 
                              events_arrays['trace_start_lon']))
        
        self.assert_(allclose(expected_trace_end_lat, 
                              events_arrays['trace_end_lat']))
        
        self.assert_(allclose(expected_trace_end_lon, 
                              events_arrays['trace_end_lon']))
        
        self.assert_(allclose(expected_rupture_centroid_lat, 
                              events_arrays['rupture_centroid_lat']))
        
        self.assert_(allclose(expected_rupture_centroid_lon, 
                              events_arrays['rupture_centroid_lon']))
        
        self.assert_(allclose(expected_length, 
                              events_arrays['length']))
        
        self.assert_(allclose(expected_azimuth, 
                              events_arrays['azimuth']))
        
        self.assert_(allclose(expected_width, 
                              events_arrays['width']))
        
        self.assert_(allclose(expected_dip, 
                              events_arrays['dip']))
        
        self.assert_(allclose(expected_depth, 
                              events_arrays['depth']))
        
        self.assert_(allclose(expected_Mw, 
                              events_arrays['Mw']))
        
        self.assert_(allclose(expected_Rjb, 
                              events_arrays['Rjb']))
        
        #self.assert_(allclose(expected_Rrup,
        #                      events_arrays['Rrup']))
        
        self.assert_(allclose(expected_activity, 
                              events_arrays['activity']))
        
        self.assert_(allclose(expected_site_lat, 
                              events_arrays['site_lat']))
        
        self.assert_(allclose(expected_site_lon, 
                              events_arrays['site_lon']))