def get_last_calc_id(username=None): """ :param username: if given, restrict to it :returns: the last calculation in the database or the datastore """ if config.dbserver.multi_user: job = dbcmd('get_job', -1, username) # can be None return getattr(job, 'id', 0) else: # single user return datastore.get_last_calc_id()
def init(calc_id='nojob', level=logging.INFO): """ 1. initialize the root logger (if not already initialized) 2. set the format of the root handlers (if any) 3. return a new calculation ID candidate if calc_id is None """ if not logging.root.handlers: # first time logging.basicConfig(level=level) if calc_id == 'job': # produce a calc_id by creating a job in the db calc_id = dbcmd('create_job', datastore.get_datadir()) elif calc_id == 'nojob': # produce a calc_id without creating a job calc_id = datastore.get_last_calc_id() + 1 else: assert isinstance(calc_id, int), calc_id fmt = '[%(asctime)s #{} %(levelname)s] %(message)s'.format(calc_id) for handler in logging.root.handlers: handler.setFormatter(logging.Formatter(fmt)) return calc_id
def init(calc_id='nojob', level=logging.INFO): """ 1. initialize the root logger (if not already initialized) 2. set the format of the root handlers (if any) 3. return a new calculation ID candidate if calc_id is 'job' or 'nojob' (with 'nojob' the calculation ID is not stored in the database) """ if not logging.root.handlers: # first time logging.basicConfig(level=level) if calc_id == 'job': # produce a calc_id by creating a job in the db calc_id = dbcmd('create_job', datastore.get_datadir()) elif calc_id == 'nojob': # produce a calc_id without creating a job calc_id = datastore.get_last_calc_id() + 1 else: assert isinstance(calc_id, int), calc_id fmt = '[%(asctime)s #{} %(levelname)s] %(message)s'.format(calc_id) for handler in logging.root.handlers: handler.setFormatter(logging.Formatter(fmt)) return calc_id
def init(calc_id='nojob', level=logging.INFO): """ 1. initialize the root logger (if not already initialized) 2. set the format of the root handlers (if any) 3. return a new calculation ID candidate if calc_id is 'job' or 'nojob' (with 'nojob' the calculation ID is not stored in the database) """ if not logging.root.handlers: # first time logging.basicConfig(level=level) if calc_id == 'job': # produce a calc_id by creating a job in the db calc_id = dbcmd('create_job', datastore.get_datadir()) elif calc_id == 'nojob': # produce a calc_id without creating a job calc_id = datastore.get_last_calc_id() + 1 else: calc_id = int(calc_id) path = os.path.join(datastore.get_datadir(), 'calc_%d.hdf5' % calc_id) if os.path.exists(path): raise OSError('%s already exists' % path) fmt = '[%(asctime)s #{} %(levelname)s] %(message)s'.format(calc_id) for handler in logging.root.handlers: f = logging.Formatter(fmt, datefmt='%Y-%m-%d %H:%M:%S') handler.setFormatter(f) return calc_id
def calculate_consequences(job_id='-1'): calc_id = datastore.get_last_calc_id() if job_id == '-1' else int(job_id) dstore = datastore.read(calc_id) lt = 0 # structural damage stat = 0 # damage state mean values num_rlzs = len(dstore["weights"]) assetcol = dstore['assetcol'] taxonomies = assetcol.tagcol.taxonomy # Read the asset damage table from the calculation datastore calculation_mode = dstore['oqparam'].calculation_mode if calculation_mode == 'scenario_damage': damages = dstore['damages-rlzs'] elif calculation_mode == 'classical_damage': damages = dstore['damages-stats'] else: print("Consequence calculations not supported for ", calculation_mode) return # Read the various consequences tables from the spreadsheet square_footage_df = read_params["Square Footage"](xlsx) repair_ratio_str_df = read_params["Structural Repair Ratios"](xlsx) repair_ratio_nsa_df = read_params["NonstrAccel Repair Ratios"](xlsx) repair_ratio_nsd_df = read_params["NonstrDrift Repair Ratios"](xlsx) repair_ratio_con_df = read_params["Contents Damage Ratios"](xlsx) collapse_rate_df = read_params["Collapse Rates"](xlsx) casualty_rate_in_df = read_params["Indoor Casualty Rates"](xlsx) casualty_rate_out_df = read_params["Outdoor Casualty Rates"](xlsx) repair_time_df = read_params["Building Repair Time"](xlsx) recovery_time_df = read_params["Building Recovery Time"](xlsx) interruption_time_df = read_params["Interruption Time Multipliers"](xlsx) debris_df = read_params["Debris"](xlsx) unit_weight_df = debris_df["Unit Weight (tons per 1,000 sqft)"] debris_brick_wood_pct_df = debris_df[ "Brick, Wood, and Other Debris Generated (in Percentage of Weight)"] debris_concrete_steel_pct_df = debris_df[ "Reinforced Concrete and Wrecked Steel Generated (in Percentage of Weight)"] # Initialize lists / dicts to store the asset level casualty estimates severity_levels = ["Severity 1", "Severity 2", "Severity 3", "Severity 4"] casualties_day = { "Severity 1": 0, "Severity 2": 0, "Severity 3": 0, "Severity 4": 0 } casualties_night = { "Severity 1": 0, "Severity 2": 0, "Severity 3": 0, "Severity 4": 0 } casualties_transit = { "Severity 1": 0, "Severity 2": 0, "Severity 3": 0, "Severity 4": 0 } for rlzi in range(num_rlzs): print("Processing realization {} of {}".format(rlzi + 1, num_rlzs)) filename = "consequences-rlz-" + str(rlzi).zfill(3) + "_" + str( calc_id) + ".csv" with open(filename, 'w') as f: writer = csv.writer(f) # Write the header row to the csv file writer.writerow([ "asset_ref", "number_of_buildings", "value_structural", "value_nonstructural", "value_contents", "occupants_day", "occupants_night", "occupants_transit", "collapse_ratio", "mean_repair_time", "mean_recovery_time", "mean_interruption_time", "casualties_day_severity_1", "casualties_day_severity_2", "casualties_day_severity_3", "casualties_day_severity_4", "casualties_night_severity_1", "casualties_night_severity_2", "casualties_night_severity_3", "casualties_night_severity_4", "casualties_transit_severity_1", "casualties_transit_severity_2", "casualties_transit_severity_3", "casualties_transit_severity_4", "sc_Displ3", "sc_Displ30", "sc_Displ90", "sc_Displ180", "sc_Displ360", "sc_BusDispl30", "sc_BusDispl90", "sc_BusDispl180", "sc_BusDispl360", "debris_brick_wood_tons", "debris_concrete_steel_tons" ]) for asset in tqdm(assetcol): asset_ref = asset['id'].decode() asset_occ, asset_typ, code_level = taxonomies[ asset['taxonomy']].split('-') if calculation_mode == 'scenario_damage': # Note: engine versions <3.10 require an additional 'stat' variable # as the previous output includes mean and stddev fields # asset_damages = damages[asset['ordinal'], rlzi, lt, stat] asset_damages = damages[asset['ordinal'], rlzi, lt] elif calculation_mode == 'classical_damage': asset_damages = damages[asset['ordinal'], stat, rlzi] asset_damages = [max(0, d) for d in asset_damages] asset_damage_ratios = [ d / asset['number'] for d in asset_damages ] # Repair and recovery time estimates # Hazus tables 15.9, 15.10, 15.11 repair_time = np.dot(asset_damage_ratios, repair_time_df.loc[asset_occ]) recovery_time = np.dot(asset_damage_ratios, recovery_time_df.loc[asset_occ]) interruption_time = np.dot( asset_damage_ratios, recovery_time_df.loc[asset_occ] * interruption_time_df.loc[asset_occ]) # Debris weight estimates # Hazus tables 12.1, 12.2, 12.3 unit_weight = unit_weight_df.loc[asset_typ] weight_brick_wood = ( unit_weight["Brick, Wood and Other"] * square_footage_df.loc[asset_occ].values[0] / 1000 * asset['number']) weight_concrete_steel = ( unit_weight["Reinforced Concrete and Steel"] * square_footage_df.loc[asset_occ].values[0] / 1000 * asset['number']) debris_brick_wood_pct = debris_brick_wood_pct_df.loc[asset_typ] debris_concrete_steel_pct = debris_concrete_steel_pct_df.loc[ asset_typ] debris_brick_wood_str = weight_brick_wood[ "Structural"] * np.dot( asset_damage_ratios, debris_brick_wood_pct["Structural Damage State"] / 100) debris_brick_wood_nst = weight_brick_wood[ "Nonstructural"] * np.dot( asset_damage_ratios, debris_brick_wood_pct["Nonstructural Damage State"] / 100) debris_concrete_steel_str = weight_concrete_steel[ "Structural"] * np.dot( asset_damage_ratios, debris_concrete_steel_pct["Structural Damage State"] / 100) debris_concrete_steel_nst = weight_concrete_steel[ "Nonstructural"] * np.dot( asset_damage_ratios, debris_concrete_steel_pct["Nonstructural Damage State"] / 100) debris_brick_wood = debris_brick_wood_str + debris_brick_wood_nst debris_concrete_steel = debris_concrete_steel_str + debris_concrete_steel_nst # Estimate number of displaced occupants based on heuristics provided by Murray sc_Displ3 = asset[ "occupants_night"] if recovery_time > 3 and recovery_time < 30 else 0 sc_Displ30 = asset[ "occupants_night"] if recovery_time > 30 else 0 sc_Displ90 = asset[ "occupants_night"] if recovery_time > 90 else 0 sc_Displ180 = asset[ "occupants_night"] if recovery_time > 180 else 0 sc_Displ360 = asset[ "occupants_night"] if recovery_time > 360 else 0 sc_BusDispl30 = asset[ "occupants_day"] if recovery_time > 30 else 0 sc_BusDispl90 = asset[ "occupants_day"] if recovery_time > 90 else 0 sc_BusDispl180 = asset[ "occupants_day"] if recovery_time > 180 else 0 sc_BusDispl360 = asset[ "occupants_day"] if recovery_time > 360 else 0 # Split complete damage state into collapse and non-collapse # This distinction is then used for the casualty estimates # Collapse rates given complete damage are from Hazus table 13.8 collapse_rate = collapse_rate_df.loc[asset_typ].values[0] dmg = { "Slight Damage": asset_damage_ratios[1], "Moderate Damage": asset_damage_ratios[2], "Extensive Damage": asset_damage_ratios[3], "Complete Damage (No Collapse)": asset_damage_ratios[4] * (1 - collapse_rate), "Complete Damage (With Collapse)": asset_damage_ratios[4] * collapse_rate } collapse_ratio = dmg["Complete Damage (With Collapse)"] collapse_ratio_str = "{:.2e}".format( collapse_ratio) if collapse_ratio else '0' # Estimate casualties (day/night/transit) at four severity levels # Hazus tables 13.3, 13.4, 13.5, 13.6, 13.7 for severity_level in severity_levels: casualty_ratio = np.dot( list(dmg.values()), casualty_rate_in_df.loc[asset_typ][:, severity_level]) casualties_day[severity_level] = (casualty_ratio * asset["occupants_day"]) casualties_night[severity_level] = ( casualty_ratio * asset["occupants_night"]) casualties_transit[severity_level] = ( casualty_ratio * asset["occupants_transit"]) # Write all consequence estimates for this asset to the csv file writer.writerow([ asset_ref, "{0:,.1f}".format(asset['number']), "{0:,.1f}".format(asset["value-structural"]), "{0:,.1f}".format(asset["value-nonstructural"]), "{0:,.1f}".format(asset["value-contents"]), "{0:,.1f}".format(asset["occupants_day"]), "{0:,.1f}".format(asset["occupants_night"]), "{0:,.1f}".format(asset["occupants_transit"]), collapse_ratio_str, "{0:,.1f}".format(repair_time), "{0:,.1f}".format(recovery_time), "{0:,.1f}".format(interruption_time), "{0:,.2f}".format(casualties_day["Severity 1"]), "{0:,.2f}".format(casualties_day["Severity 2"]), "{0:,.2f}".format(casualties_day["Severity 3"]), "{0:,.2f}".format(casualties_day["Severity 4"]), "{0:,.2f}".format(casualties_night["Severity 1"]), "{0:,.2f}".format(casualties_night["Severity 2"]), "{0:,.2f}".format(casualties_night["Severity 3"]), "{0:,.2f}".format(casualties_night["Severity 4"]), "{0:,.2f}".format(casualties_transit["Severity 1"]), "{0:,.2f}".format(casualties_transit["Severity 2"]), "{0:,.2f}".format(casualties_transit["Severity 3"]), "{0:,.2f}".format(casualties_transit["Severity 4"]), "{0:,.1f}".format(sc_Displ3), "{0:,.1f}".format(sc_Displ30), "{0:,.1f}".format(sc_Displ90), "{0:,.1f}".format(sc_Displ180), "{0:,.1f}".format(sc_Displ360), "{0:,.1f}".format(sc_BusDispl30), "{0:,.1f}".format(sc_BusDispl90), "{0:,.1f}".format(sc_BusDispl180), "{0:,.1f}".format(sc_BusDispl360), "{0:,.1f}".format(debris_brick_wood), "{0:,.1f}".format(debris_concrete_steel), ])