def wagen1_plot(c: Config): """Plot of wagen1 compared to given specification.""" plt.landscape() wheel_print = (0.31, 0.25) wheel_prints = [] for w_i in range(len(truck1.axle_distances) + 1): if w_i in [1, 2]: wheel_prints.append([wheel_print, wheel_print]) else: wheel_prints.append([wheel_print]) plt.subplot(1, 2, 1) xlim, ylim = topview_vehicle(truck1, wheel_prints=wheel_prints) plt.title("Truck 1 specification") plt.xlabel("Width (m)") plt.ylabel("Length (m)") plt.subplot(1, 2, 2) topview_vehicle(truck1, xlim=xlim, ylim=ylim) plt.title("Truck 1 in simulation") plt.xlabel("Width (m)") plt.ylabel("Length (m)") plt.savefig(c.get_image_path("vehicles", "wagen-1", bridge=False) + ".pdf") plt.close()
def cracked_concrete_plots(c: Config): """Contour plots of cracked concrete scenarios.""" response_type = ResponseType.YTranslation # 10 x 10 grid of points on the bridge deck where to record fem. points = [ Point(x=x, y=0, z=z) for x, z in itertools.product( np.linspace(c.bridge.x_min, c.bridge.x_max, 10), np.linspace(c.bridge.z_min, c.bridge.z_max, 10), ) ] # Create empty traffic array and collect fem. response_array = responses_to_traffic_array( c=c, traffic_array=load_normal_traffic_array(c)[0], response_type=response_type, bridge_scenario=cracked_scenario, points=points, sim_runner=OSRunner, ) for t in range(len(response_array)): top_view_bridge(c.bridge, abutments=True, piers=True) responses = Responses.from_responses( response_type=response_type, responses=[(response_array[t][p], point) for p, point in enumerate(points)], ) plot_contour_deck(c=c, responses=responses, center_norm=True) plt.title("Cracked Concrete") plt.savefig(c.get_image_path("cracked-scenario", f"cracked-time-{t}")) plt.close()
def opensees_default(bridge: Callable[[], Bridge], os_exe: Optional[str] = None, **kwargs) -> Config: """A Config using OpenSees for a given Bridge. Args: bridge: function to return a new Bridge. os_exe: absolute path to OpenSees binary. Optional, if not given this will look for OpenSees on the $PATH. kwargs: keyword arguments passed to the Config constructor. """ return Config( bridge=bridge, sim_runner=os_runner(os_exe), vehicle_data_path=os.path.join(project_dir(), "data/traffic/traffic.csv"), vehicle_pdf=[ (2.4, 5), (5.6, 45), (7.5, 30), (9, 15), (11.5, 4), (12.2, 0.5), (43, 0), ], vehicle_pdf_col="length", **kwargs, )
def make_available_sensors_plot(c: Config, pier_radius: float, track_radius: float, edge_radius: float): """Scatter plot of sensors used for classification.""" top_view_bridge(c.bridge, abutments=True, piers=True, compass=False) plot_deck_sensors( c=c, without=without.points( c=c, pier_radius=pier_radius, track_radius=track_radius, edge_radius=edge_radius, ), label=True, ) for l_i, load in enumerate([Point(x=21, z=-8.4), Point(x=33, z=-4)]): plt.scatter( [load.x], [load.z], color="red", marker="o", s=50, label="Sensor of interest" if l_i == 0 else None, ) legend_marker_size(plt.legend(), 50) plt.title(f"Sensors available for classification on Bridge 705") plt.tight_layout() plt.savefig(c.get_image_path("sensors", "unavailable-sensors.pdf")) plt.close()
def make_boundary_plot(c: Config): """Top view of bridge with boundary conditions.""" plt.landscape() top_view_bridge(c.bridge, abutments=True, piers=True, compass=False) plt.vlines( [0, c.bridge.length], c.bridge.z_min, c.bridge.z_max, lw=5, color="orange", label=" Y = 1, Z = 1", ) for p_i, pier in enumerate(c.bridge.supports): z_min_top, z_max_top = pier.z_min_max_bottom() x_min, x_max = pier.x_min_max_top() x_center = x_min + ((x_max - x_min) / 2) plt.vlines( [x_center], z_min_top, z_max_top, lw=5, color="red" if (8 <= p_i <= 15) else "orange", label="X = 1, Y = 1, Z = 1" if p_i == 8 else None, ) legend_marker_size(plt.legend(), 50) plt.title("Bridge 705 boundary conditions of nodal supports") plt.tight_layout() plt.savefig(c.get_image_path("sensors", "boundary.pdf")) plt.close()
def temperature_effect_date(c: Config, month: str, vert: bool): temp = __init__.load(name=month) point = Point(x=51, y=0, z=-8.4) plt.landscape() def plot_hours(): if not vert: return label_set = False for dt in temp["datetime"]: if np.isclose(float(dt.hour + dt.minute), 0): label = None if not label_set: label = "Time at vertical line = 00:00" label_set = True plt.axvline(x=dt, linewidth=1, color="black", label=label) # Plot the temperature. plt.subplot(2, 1, 1) plot_hours() plt.scatter( temp["datetime"], temp["temp"], c=temp["missing"], cmap=mpl.cm.get_cmap("bwr"), s=1, ) plt.ylabel("Temperature (°C)") plt.xlabel("Date") plt.gcf().autofmt_xdate() plt.title(f"Temperature in {str(month[0]).upper()}{month[1:]}") plt.legend() # Plot the effect at a point. response_type = ResponseType.YTranslation plt.subplot(2, 1, 2) plot_hours() effect = __init__.effect( c=c, response_type=response_type, points=[point], temps=temp["temp"] )[0] plt.scatter( temp["datetime"], effect * 1000, c=temp["missing"], cmap=mpl.cm.get_cmap("bwr"), s=1, ) plt.ylabel(f"{response_type.name()} (mm)") plt.xlabel("Date") plt.gcf().autofmt_xdate() plt.title(f"{response_type.name()} to unit thermal loading in {month}") # Save. plt.tight_layout() plt.savefig(c.get_image_path("classify/temperature", f"{month}.png")) plt.savefig(c.get_image_path("classify/temperature", f"{month}.pdf")) plt.close()
def vehicle_pdf_groups(c: Config): """Return vehicles PDF groups, only ever calculated once.""" if not hasattr(c, "_vehicle_pdf_groups"): start = timer() c._vehicle_pdf_groups = _vehicle_pdf_groups( c.vehicle_data, c.vehicle_pdf_col, list(map(lambda x: x[0], c.vehicle_pdf)), ) print_s(f"Vehicle PDF groups loaded in {timer() - start}") return c._vehicle_pdf_groups
def pairwise_cluster(c: Config, load: bool): """Cluster pairwise maps from healthy and damaged scenarios.""" features_path = c.get_data_path("features", "pairwise-cluster", bridge=False) if not load: normal_traffic_array, _ = load_normal_traffic_array(c=c, mins=24) normal_traffic_array = normal_traffic_array[ int(len(normal_traffic_array) / 24) : ] response_type = ResponseType.YTranslation grid_points = [ Point(x=x, y=0, z=-9.65) for x, _ in itertools.product( np.linspace(c.bridge.x_min, c.bridge.x_max, 50), # np.linspace(c.bridge.x_min, c.bridge.x_max, 4), [1], ) ] # Collect a list of features per scenarios scenario. features = [] for damage_scenario in healthy_and_cracked_scenarios[1:]: damage_c = damage_scenario.use(c) responses = responses_to_traffic_array( c=damage_c, traffic_array=normal_traffic_array, response_type=response_type, bridge_scenario=damage_scenario, points=grid_points, sim_runner=OSRunner, ).T ks_values = [] for p0_i, point0 in enumerate(grid_points): print_i(f"Point {p0_i + 1} / {len(grid_points)}", end="\r") ks_values.append([]) for p1_i, point1 in enumerate(grid_points): ks = ks_no_outliers(responses[p0_i], responses[p1_i]) ks_values[-1].append(ks) features.append((ks_values, damage_scenario.name)) # Save features to disk. features = np.array(features) np.save(features_path, features) features = np.load(features_path) # Reduce each pairwise map to a sum per sensor. for f_i, (feature, feature_name) in enumerate(features): features[f_i] = ([sum(sensor) for sensor in feature], feature_name) features[f_i] = ([sum(sensor) for sensor in features[f_i]], feature_name) # Cluster each pairwise map. from sklearn.cluster import KMeans kmeans = KMeans(n_clusters=2) kmeans.fit(features)
def cover_photo(c: Config, x: float, deformation_amp: float): """ TODO: SimParams takes any loads iterable, to be flattened. TODO: Wrap SimRunner into Config. TODO: Ignore response type in SimParams (fill in by load_sim_responses). """ response_type = ResponseType.YTranslation sim_responses = load_fem_responses( c=c, sim_runner=OSRunner(c), response_type=response_type, sim_params=SimParams( response_types=[response_type], ploads=list( chain.from_iterable( truck1.to_point_loads( bridge=c.bridge, time=truck1.time_at(x=x, bridge=c.bridge), ))), ), ) shells = contour_responses_3d(c=c, sim_responses=sim_responses) for cmap in [ parula_cmap, get_cmap("jet"), get_cmap("coolwarm"), get_cmap("viridis"), ]: contour_responses_3d( c=c, sim_responses=sim_responses, deformation_amp=deformation_amp, shells=shells, cmap=cmap, ) plt.axis("off") plt.grid(False) plt.savefig( c.get_image_path( "cover-photo", f"cover-photo-deform-{deformation_amp}" f"-cmap-{cmap.name}.pdf", )) plt.close()
def traffic_response_plots(c: Config, times: int = 3): """Response to normal traffic per scenarios scenario at multiple time steps.""" response_type = ResponseType.YTranslation # 10 x 10 grid of points on the bridge deck where to record fem. points = [ Point(x=x, y=0, z=z) for x, z in itertools.product( np.linspace(c.bridge.x_min, c.bridge.x_max, 10), np.linspace(c.bridge.z_min, c.bridge.z_max, 10), ) ] # for damage_scenario in all_scenarios(c): for damage_scenario in [unit_temp_scenario]: response_array = responses_to_traffic_array( c=c, traffic_array=load_normal_traffic_array(c, mins=1)[0], response_type=response_type, bridge_scenario=damage_scenario, points=points, sim_runner=OSRunner, ) print(response_array.shape) mean_response_array = np.mean(response_array, axis=0).T print(mean_response_array.shape) print(mean_response_array.shape) for t in range(times): time_index = -1 + abs(t) top_view_bridge(c.bridge, abutments=True, piers=True) responses = Responses.from_responses( response_type=response_type, responses=[(response_array[time_index][p], point) for p, point in enumerate(points)], ) plot_contour_deck(c=c, responses=responses, center_norm=True, levels=100) plt.title(damage_scenario.name) plt.savefig( c.get_image_path( "contour-traffic-response", f"{damage_scenario.name}-time={time_index}", )) plt.close()
def load_traffic( c: Config, traffic_scenario: TrafficScenario, max_time: float, add: Optional[str] = None, ): """Load traffic from disk, generated if necessary.""" path = ( c.get_data_path( "traffic", _traffic_name(c=c, traffic_scenario=traffic_scenario, max_time=max_time), acc=False, ) + ".npy" ) print(path) if add is not None: path += add # Create the traffic if it doesn't exist. if not os.path.exists(path + ".arr"): traffic_sequence = traffic_scenario.traffic_sequence( bridge=c.bridge, max_time=max_time ) traffic = to_traffic(c=c, traffic_sequence=traffic_sequence, max_time=max_time) traffic_array = to_traffic_array( c=c, traffic_sequence=traffic_sequence, max_time=max_time ) with open(path + ".seq", "wb") as f: dill.dump(traffic_sequence, f) with open(path + ".tra", "wb") as f: dill.dump(traffic, f) with open(path + ".arr", "wb") as f: np.save(f, traffic_array) with open(path + ".seq", "rb") as f: traffic_sequence = dill.load(f) with open(path + ".tra", "rb") as f: traffic = dill.load(f) with open(path + ".arr", "rb") as f: traffic_array = np.load(f) return traffic_sequence, traffic, traffic_array
def gradient_pier_displacement_plot( c: Config, pier_disp: PierSettlementScenario, response_type: ResponseType, title: str, ): """Contour plot of piers displaced in an increasing gradient.""" # 10 x 10 grid of points on the bridge deck where to record fem. points = [ Point(x=x, y=0, z=z) for x, z in itertools.product( np.linspace(c.bridge.x_min, c.bridge.x_max, 10), np.linspace(c.bridge.z_min, c.bridge.z_max, 10), ) ] # Create empty traffic array and collect fem. response_array = responses_to_traffic_array( c=c, traffic_array=np.zeros( (1, len(c.bridge.wheel_tracks(c)) * c.il_num_loads)), response_type=response_type, bridge_scenario=pier_disp, points=points, fem_runner=OSRunner(c), ) top_view_bridge(c.bridge, abutments=True, piers=True) responses = Responses.from_responses( response_type=response_type, responses=[(response_array[0][p], point) for p, point in enumerate(points)], ) plot_contour_deck(c=c, responses=responses, center_norm=True) plt.title(title) plt.savefig( c.get_image_path("pier-scenarios", f"pier-displacement-{safe_str(title)}")) plt.close()
def number_of_uls_plot(c: Config): """Plot error as a function of number of unit load simulations.""" if not c.shorten_paths: raise ValueError("This plot requires --shorten-paths true") response_type = ResponseType.YTranslation num_ulss = np.arange(100, 2000, 10) chosen_uls = 600 point = Point(x=c.bridge.x_max - (c.bridge.length / 2), y=0, z=-8.4) wagen1_time = truck1.time_at(x=point.x, bridge=c.bridge) print_i(f"Wagen 1 time at x = {point.x:.3f} is t = {wagen1_time:.3f}") # Determine the reference value. truck_loads = flatten( truck1.to_point_load_pw(time=wagen1_time, bridge=c.bridge), PointLoad) print_i(f"Truck loads = {truck_loads}") sim_responses = load_fem_responses( c=c, response_type=response_type, sim_runner=OSRunner(c), sim_params=SimParams(ploads=truck_loads, response_types=[response_type]), ) ref_value = sim_responses.at_deck(point, interp=True) * 1000 print_i(f"Reference value = {ref_value}") # Collect the data. total_load = [] num_loads = [] responses = [] for num_uls in num_ulss: c.il_num_loads = num_uls # Nested in here because it depends on the setting of 'il_num_loads'. truck_loads = flatten( truck1.to_wheel_track_loads(c=c, time=wagen1_time), PointLoad) num_loads.append(len(truck_loads)) total_load.append(sum(map(lambda l: l.kn, truck_loads))) sim_responses = load_fem_responses( c=c, response_type=response_type, sim_runner=OSRunner(c), sim_params=SimParams(ploads=truck_loads, response_types=[response_type]), ) responses.append(sim_responses.at_deck(point, interp=True) * 1000) # Plot the raw fem, then error on the second axis. plt.landscape() # plt.plot(num_ulss, fem) # plt.ylabel(f"{response_type.name().lower()} (mm)") plt.xlabel("ULS") error = np.abs(np.array(responses) - ref_value).flatten() * 100 # ax2 = plt.twinx() plt.plot(num_ulss, error) plt.ylabel("Error (%)") plt.title( f"Error in {response_type.name()} to Truck 1 as a function of ULS") # Plot the chosen number of ULS. chosen_error = np.interp([chosen_uls], num_ulss, error)[0] plt.axhline( chosen_error, label=f"At {chosen_uls} ULS, error = {np.around(chosen_error, 2)} %", color="black", ) plt.axhline(0, color="red", label="Response from direct simulation (no wheel tracks)") plt.legend() plt.tight_layout() plt.savefig(c.get_image_path("paramselection", "uls.pdf")) plt.close() # Additional verification plots. plt.plot(num_ulss, total_load) plt.savefig(c.get_image_path("paramselection", "uls-verify-total-load.pdf")) plt.close() plt.plot(num_ulss, num_loads) plt.savefig(c.get_image_path("paramselection", "uls-verify-num-loads.pdf")) plt.close()
def experiment_noise(c: Config): """Plot displacement and strain noise from dynamic test 1""" ################ # Displacement # ################ plt.portrait() # Find points of each sensor. displa_labels = ["U13", "U26", "U29"] displa_points = [] for displa_label in displa_labels: sensor_x, sensor_z = _displa_sensor_xz(displa_label) displa_points.append(Point(x=sensor_x, y=0, z=sensor_z)) # For each sensor plot and estimate noise. side = 700 for s_i, displa_label in enumerate(displa_labels): # First plot the signal, and smoothed signal. plt.subplot(len(displa_points), 2, (s_i * 2) + 1) with open(f"validation/experiment/D1a-{displa_label}.txt") as f: data = list(map(float, f.readlines())) # Find the center of the plot, minimum point in first 15000 points. data_center = 0 for i in range(15000): if data[i] < data[data_center]: data_center = i data = data[data_center - side:data_center + side] smooth = savgol_filter(data, 31, 3) plt.plot(data, linewidth=1) plt.plot(smooth, linewidth=1) plt.ylim(-0.8, 0.3) plt.title(f"{displa_label} in dynamic test") # Then plot subtraction of smoothed from noisey. plt.subplot(len(displa_points), 2, (s_i * 2) + 2) noise = data - smooth plt.plot(noise, label=f"σ = {np.around(np.std(noise), 4)}") plt.legend() plt.title(f"Noise from {displa_label}") plt.tight_layout() plt.savefig(c.get_image_path("params", "noise-displa.pdf")) plt.close() ########## # Strain # ########## plt.portrait() # Find points of each sensor. strain_labels = ["T1", "T10", "T11"] strain_points = [] for strain_label in strain_labels: sensor_x, sensor_z = _strain_sensor_xz(strain_label) strain_points.append(Point(x=sensor_x, y=0, z=sensor_z)) # For each sensor plot and estimate noise. side = 700 xmin, xmax = np.inf, -np.inf for s_i, strain_label in enumerate(strain_labels): # First plot the signal, and smoothed signal. plt.subplot(len(strain_points), 2, (s_i * 2) + 1) with open(f"validation/experiment/D1a-{strain_label}.txt") as f: data = list(map(float, f.readlines())) # Find the center of the plot, minimum point in first 15000 points. data_center = 0 for i in range(15000): if data[i] < data[data_center]: data_center = i data = data[data_center - side:data_center + side] smooth = savgol_filter(data, 31, 3) plt.plot(data, linewidth=1) plt.plot(smooth, linewidth=1) plt.title(f"{strain_label} in dynamic test") # Then plot subtraction of smoothed from noisey. plt.subplot(len(strain_points), 2, (s_i * 2) + 2) noise = data - smooth plt.plot(noise, label=f"σ = {np.around(np.std(noise), 4)}") plt.legend() plt.title(f"Noise from {strain_label}") plt.tight_layout() plt.savefig(c.get_image_path("params", "noise-strain.pdf")) plt.close()
def point_load_response_plots(c: Config, x: float, z: float, kn: int = 1000, run: bool = False): """Response to a point load per scenarios scenario.""" response_types = [ResponseType.YTranslation, ResponseType.Strain] # scenarios = all_scenarios(c) damage_scenarios = [HealthyScenario(), transverse_crack()] # 10 x 10 grid of points on the bridge deck where to record fem. points = [ Point(x=x, y=0, z=z) for x, z in itertools.product( np.linspace(c.bridge.x_min, c.bridge.x_max, 30), np.linspace(c.bridge.z_min, c.bridge.z_max, 100), ) ] for response_type in response_types: all_responses = [] for damage_scenario in damage_scenarios: sim_params = SimParams( response_types=[response_type], ploads=[ PointLoad(x_frac=c.bridge.x_frac(x), z_frac=c.bridge.z_frac(z), kn=kn) ], ) use_c, sim_params = damage_scenario.use(c=c, sim_params=sim_params) all_responses.append( load_fem_responses( c=use_c, sim_params=sim_params, response_type=response_type, sim_runner=OSRunner(use_c), run=run, ).resize()) amin, amax = np.inf, -np.inf for sim_responses in all_responses: responses = np.array(list(sim_responses.values())) amin = min(amin, min(responses)) amax = max(amax, max(responses)) for d, damage_scenario in enumerate(damage_scenarios): top_view_bridge(c.bridge, abutments=True, piers=True) plot_contour_deck( c=c, responses=all_responses[d], levels=100, norm=colors.Normalize(vmin=amin, vmax=amax), decimals=10, ) plt.title(damage_scenario.name) plt.tight_layout() plt.savefig( c.get_image_path( "contour/point-load", safe_str( f"x-{x:.2f}-z-{z:.2f}-kn-{kn}-{response_type.name()}-{damage_scenario.name}" ) + ".pdf", )) plt.close()
def piers_displaced(c: Config): """Contour plots of pier displacement for the given pier indices.""" pier_indices = [4, 5] response_types = [ResponseType.YTranslation, ResponseType.Strain] axis_values = pd.read_csv("validation/axis-screenshots/piers-min-max.csv") for r_i, response_type in enumerate(response_types): for p in pier_indices: # Run the simulation and collect fem. sim_responses = load_fem_responses( c=c, response_type=response_type, sim_runner=OSRunner(c), sim_params=SimParams(displacement_ctrl=PierSettlement( displacement=c.pd_unit_disp, pier=p), ), ) # In the case of stress we map from kn/m2 to kn/mm2 (E-6) and then # divide by 1000, so (E-9). assert c.pd_unit_disp == 1 if response_type == ResponseType.Strain: sim_responses.to_stress(c.bridge).map(lambda r: r * 1e-9) # Get min and max values for both Axis and OpenSees. rt_str = ("displa" if response_type == ResponseType.YTranslation else "stress") row = axis_values[axis_values["name"] == f"{p}-{rt_str}"] dmin, dmax = float(row["dmin"]), float(row["dmax"]) omin, omax = float(row["omin"]), float(row["omax"]) amin, amax = max(dmin, omin), min(dmax, omax) levels = np.linspace(amin, amax, 16) # Plot and save the image. If plotting strains use Axis values for # colour normalization. # norm = None from plot import axis_cmap_r cmap = axis_cmap_r top_view_bridge(c.bridge, abutments=True, piers=True) plot_contour_deck(c=c, cmap=cmap, responses=sim_responses, levels=levels) plt.tight_layout() plt.title( f"{sim_responses.response_type.name()} from 1mm pier settlement with OpenSees" ) plt.savefig( c.get_image_path( "validation/pier-displacement", safe_str(f"pier-{p}-{sim_responses.response_type.name()}") + ".pdf", )) plt.close() # First plot and clear, just to have the same colorbar. plot_contour_deck(c=c, responses=sim_responses, cmap=cmap, levels=levels) plt.cla() # Save the axis plots. axis_img = mpimg.imread( f"validation/axis-screenshots/{p}-{rt_str}.png") top_view_bridge(c.bridge, abutments=True) plt.imshow( axis_img, extent=( c.bridge.x_min, c.bridge.x_max, c.bridge.z_min, c.bridge.z_max, ), ) # Plot the load and min, max values. for point, leg_label, color in [ ((0, 0), f"min = {np.around(dmin, 3)} {sim_responses.units}", "r"), ((0, 0), f"max = {np.around(dmax, 3)} {sim_responses.units}", "r"), ( (0, 0), f"|min-max| = {np.around(abs(dmax - dmin), 3)} {sim_responses.units}", "r", ), ]: plt.scatter( [point[0]], [point[1]], label=leg_label, marker="o", color=color, alpha=0, ) if response_type == ResponseType.YTranslation: plt.legend() # Title and save. plt.title( f"{response_type.name()} from 1mm pier settlement with AxisVM") plt.xlabel("X position (m)") plt.ylabel("Z position (m)") plt.tight_layout() plt.savefig( c.get_image_path( "validation/pier-displacement", f"{p}-axis-{rt_str}.pdf", )) plt.close()
def pairwise_sensors(c: Config, dist_measure=ks_no_outliers): """Compare distribution of pairs of sensors under HealthyScenario.""" normal_traffic_array, traffic_scenario = load_normal_traffic_array(c) response_type = ResponseType.YTranslation points = [ Point(x=x, y=0, z=z) for x, z in itertools.product( np.linspace(c.bridge.x_min, c.bridge.x_max, 50), np.linspace(c.bridge.z_min, c.bridge.z_max, 4), ) ] bridge_scenario = HealthyScenario() responses = responses_to_traffic_array( c=c, traffic_array=normal_traffic_array, response_type=response_type, bridge_scenario=bridge_scenario, points=points, sim_runner=OSRunner, ).T assert len(responses) == len(points) ks_values_healthy = [] for p0, point0 in enumerate(points): print_i(f"Point {p0 + 1} / {len(points)}") ks_values_healthy.append([]) for p1, point1 in enumerate(points): ks = dist_measure(responses[p0], responses[p1]) ks_values_healthy[-1].append(ks) plt.landscape() plt.imshow(ks_values_healthy) plt.savefig(c.get_image_path("joint-clustering", "healthy-bridge")) plt.close() bridge_scenario = each_pier_scenarios(c)[0] responses = responses_to_traffic_array( c=c, traffic_array=normal_traffic_array, response_type=response_type, bridge_scenario=bridge_scenario, points=points, sim_runner=OSRunner, ).T assert len(responses) == len(points) ks_values_damage = [] for p0, point0 in enumerate(points): print_i(f"Point {p0 + 1} / {len(points)}") ks_values_damage.append([]) for p1, point1 in enumerate(points): ks = dist_measure(responses[p0], responses[p1]) ks_values_damage[-1].append(ks) plt.imshow(ks_values_damage) plt.savefig(c.get_image_path("joint-clustering", "scenarios-bridge")) plt.close() ks_values_comp = [] for p0, point0 in enumerate(points): ks_values_comp.append([]) for p1, point1 in enumerate(points): comp = abs(ks_values_healthy[p0][p1] - ks_values_damage[p0][p1]) ks_values_comp[-1].append(comp) plt.landscape() plt.imshow(ks_values_comp) plt.savefig(c.get_image_path("joint-clustering", "scenarios-bridge-comp")) plt.close() responses = Responses.from_responses( response_type=response_type, responses=[(sum(ks_values_comp[p]), point) for p, point in enumerate(points)], ) top_view_bridge(c.bridge, abutments=True, piers=True) plot_contour_deck(c=c, responses=responses) plt.savefig(c.get_image_path("joint-clustering", "scenarios-bridge-comp-contour")) plt.close()
def events(c: Config, x: float, z: float): """Plot events due to normal traffic.""" point = Point(x=x, y=0, z=z) # 10 seconds of 'normal' traffic. max_time = 10 traffic_scenario = normal_traffic(c=c, lam=5, min_d=2) # Create the 'TrafficSequence' and 'TrafficArray'. traffic_sequence = traffic_scenario.traffic_sequence( bridge=c.bridge, max_time=max_time ) traffic_array = to_traffic_array( c=c, traffic_sequence=traffic_sequence, max_time=max_time ) # Find when the simulation has warmed up, and when 'TrafficArray' begins. warmed_up_at = traffic_sequence[0][0].time_left_bridge(c.bridge) traffic_array_starts = (int(warmed_up_at / c.sensor_hz) + 1) * c.sensor_hz print(f"warmed up at = {warmed_up_at}") print(f"traffic_array_starts = {traffic_array_starts}") traffic_array_ends = traffic_array_starts + (len(traffic_array) * c.sensor_hz) print(f"traffic_array_ends = {traffic_array_ends}") point_lane_ind = c.bridge.closest_lane(z) vehicles = list(set(ts[0] for ts in traffic_sequence)) print(len(vehicles)) print(vehicles[0]) vehicles = sorted( set(ts[0] for ts in traffic_sequence if ts[0].lane == point_lane_ind), key=lambda v: -v.init_x_frac, ) print(len(vehicles)) print(vehicles[0]) event_indices = [] vehicle_times = [v.time_at(x=x - 2, bridge=c.bridge) for v in vehicles] for v, t in zip(vehicles, vehicle_times): print(f"Vehicle {v.init_x_frac} {v.mps} at time {t}") start_time = int(t / c.sensor_hz) * c.sensor_hz print(f"start_time = {start_time}") ta_start_time = np.around(start_time - traffic_array_starts, 8) print(f"ta start time = {ta_start_time}") ta_start_index = int(ta_start_time / c.sensor_hz) print(f"ta start index = {ta_start_index}") ta_end_index = ta_start_index + int(c.event_time_s / c.sensor_hz) print(f"ta end index = {ta_end_index}") if ta_start_index >= 0 and ta_end_index < len(traffic_array): event_indices.append((ta_start_index, ta_end_index)) print(event_indices) responses = ( responses_to_traffic_array( c=c, traffic_array=traffic_array, response_type=ResponseType.YTranslation, damage_scenario=healthy_scenario, points=[point], sim_runner=OSRunner(c), ) * 1000 ) # fem = add_displa_noise(fem) print(responses.shape) plt.portrait() for event_ind, (event_start, event_end) in enumerate(event_indices): plt.subplot(len(event_indices), 1, event_ind + 1) plt.plot(responses[event_start : event_end + 1]) plt.tight_layout() plt.savefig(c.get_image_path("classify/events", "events.pdf")) plt.close()
def load_ulm( c: Config, response_type: ResponseType, points: List[Point], sim_runner: FEMRunner, ): wheel_zs = c.bridge.wheel_track_zs(c) filepath = c.get_data_path( "ulms", (ULResponses.id_str( c=c, response_type=response_type, sim_runner=sim_runner, wheel_zs=wheel_zs, ) + str([str(point) for point in points])) + ".ulm", ) filepath = shorten_path(c=c, bypass_config=True, filepath=filepath) if os.path.exists(filepath): with open(filepath, "rb") as f: return np.load(f) def ulm_partial(wheel_z): """Slice of unit load matrix for one wheel track.""" wheel_track = ULResponses.load_wheel_track( c=c, response_type=response_type, fem_runner=sim_runner, load_z_frac=c.bridge.z_frac(wheel_z), run_only=False, ) partial = np.empty((c.il_num_loads, len(points))) i = 0 for sim_responses in wheel_track: for j, point in enumerate(points): partial[i][j] = sim_responses.at_deck(point, interp=False) if wheel_z < 0 and i == 302: log( c, f"z = {wheel_z}, i = 302, partial[i][j] = {partial[i][j]}", ) i += 1 assert i == c.il_num_loads print_i(f"Calculated unit load matrix for wheel track {wheel_z}") return partial # Calculate results in parallel. print_i(f"Calculating unit load matrix...") with multiprocessing.Pool(processes=len(wheel_zs)) as pool: partial_results = pool.map(ulm_partial, wheel_zs) # And insert into the unit load matrix. unit_load_matrix = np.empty( (len(wheel_zs) * c.il_num_loads, len(points))) for w_ind in range(len(wheel_zs)): row_ind = w_ind * c.il_num_loads unit_load_matrix[row_ind:row_ind + c.il_num_loads] = partial_results[w_ind] # Divide by unit load, so the value at a cell is the response to 1 kN. unit_load_matrix /= c.il_unit_load_kn with open(filepath, "wb") as f: np.save(f, unit_load_matrix) return unit_load_matrix
def crack_time_series( c: Config, traffic_array, traffic_array_mins: float, sensor: Point, crack_frac: float, damage, temps: List[float], solar: List[float], ): """Time series of sensor fem, vertical translation and strain XXB. Returns a NumPy array of dimensions (2 x len(traffic_array)). Args: c: Config, global configuration object. traffic_array: TrafficArray, traffic flowing over the bridge. traffic_array_mins: float, minutes of the the traffic flow. sensor: Point, point at which to collect fem. crack_frac: float, fraction of time series where crack occurs. damage: DamageScenario, scenarios that occurs at crack_frac. temps: List[float], list of air temperature, per temperature minute. solar: List[float], list of solar radiance, per temperature minute. """ assert 0 <= crack_frac <= 1 response_types = [ResponseType.YTranslation, ResponseType.Strain] half_i = int(len(traffic_array) * crack_frac) traffic_array_0, traffic_array_1 = traffic_array[:half_i], traffic_array[ half_i:] assert len(traffic_array_0) + len(traffic_array_1) == len(traffic_array) half_t = int(len(temps) * crack_frac) assert len(temps) == len(solar) # Get the effect of temperature for both response types and damages. # In each case we have the full days worth of temperature fem. temp_effect = [] for response_type in response_types: temp_effect_damages = [] for di, ds in enumerate([HealthyDamage(), damage]): bots_tops, new_temp_effect = temperature.effect( c=ds.use(c)[0], response_type=response_type, points=[sensor], # One hour temperature data per minute of traffic data. len_per_hour=int(len(traffic_array) / traffic_array_mins) if di == 0 else None, temps=temps if di == 0 else None, solar=solar if di == 0 else None, temps_bt=bots_tops.T[int(len(bots_tops.T) / 2):].T if di == 1 else None, ret_temps_bt=True, ) bots_tops = np.array(bots_tops) temp_effect_damages.append( new_temp_effect[0] if di == 1 else new_temp_effect[0][:int(len(new_temp_effect[0]) / 2)]) temp_effect.append(np.concatenate(temp_effect_damages)) print(f"len(temps) = {len(temps)}") print(f"len_per_hour = {int(len(traffic_array) / traffic_array_mins)}") print(f"Temperature shape = {temp_effect[-1].shape}") plt.plot(temp_effect[-1]) plt.savefig( c.get_image_path("crack", safe_str(f"save-temps-{response_type}.pdf"))) plt.close() responses = [] for ri, rt in enumerate(response_types): responses_healthy_cracked = [] for ds, ta in [(HealthyDamage(), traffic_array_0), (damage, traffic_array_1)]: print( f"Sections in scenarios scenario = {len(ds.use(c)[0].bridge.sections)}" ) responses_healthy_cracked.append( responses_to_traffic_array( c=c, traffic_array=ta, response_type=rt, damage_scenario=ds, points=[sensor], ).T[0]) # Responses from a single point. responses.append(np.concatenate(responses_healthy_cracked)) print(f"shape fem without temp = {responses[-1].shape}") print(f"shape of temp effect = {temp_effect[ri].shape}") if rt == ResponseType.Strain: responses[ri] = resize_units("")[0](responses[ri]) responses[ri] += temperature.apply(temp_effect[ri], responses[ri]) responses = np.array(responses) print(f"Responses shape = {responses.shape}") return responses
def plot_mmm_strain_convergence( c: Config, pier: int, df: pd.DataFrame, all_strains: Dict[float, Responses], title: str, without: Optional[Callable[[Point], bool]] = None, append: Optional[str] = None, ): """Plot convergence of given fem as model size grows.""" # A grid of points 1m apart, over which to calculate fem. grid = [ Point(x=x, y=0, z=z) for x, z in itertools.product( np.linspace(c.bridge.x_min, c.bridge.x_max, int(c.bridge.length)), np.linspace(c.bridge.z_min, c.bridge.z_max, int(c.bridge.width)), ) ] # If requested, remove some values from the fem. if without is not None: grid = [point for point in grid if not without(point)] for msl, strains in all_strains.items(): print(f"Removing points from strains with max_shell_len = {msl}") all_strains[msl] = strains.without(without) # Collect fem over all fem, and over the grid. Iterate by # decreasing max_shell_len. mins, maxes, means = [], [], [] gmins, gmaxes, gmeans = [], [], [] max_shell_lens = [] for msl, strains in sorted(all_strains.items(), key=lambda kv: -kv[0]): max_shell_lens.append(msl) print_i(f"Gathering strains with max_shell_len = {msl}", end="\r") grid_strains = np.array([strains.at_deck(point, interp=True) for point in grid]) gmins.append(scalar(np.min(grid_strains))) gmaxes.append(scalar(np.max(grid_strains))) gmeans.append(scalar(np.mean(grid_strains))) strains = np.array(list(strains.values())) mins.append(scalar(np.min(strains))) maxes.append(scalar(np.max(strains))) means.append(scalar(np.mean(strains))) print() # Normalize and plot the mins, maxes, and means. def normalize(ys): print(ys) return ys / np.mean(ys[-5:]) mins, maxes, means = normalize(mins), normalize(maxes), normalize(means) gmins, gmaxes, gmeans = normalize(gmins), normalize(gmaxes), normalize(gmeans) deck_nodes = [df.at[msl, "deck-nodes"] for msl in max_shell_lens] pier_nodes = [df.at[msl, "pier-nodes"] for msl in max_shell_lens] num_nodes = np.array(deck_nodes) + np.array(pier_nodes) print(f"MSLs = {max_shell_lens}") print(f"num_nodes = {num_nodes}") # Plot all lines, for debugging. plt.landscape() plt.plot(num_nodes, mins, label="mins") plt.plot(num_nodes, maxes, label="maxes") plt.plot(num_nodes, means, label="means") plt.plot(num_nodes, gmins, label="gmins") plt.plot(num_nodes, gmaxes, label="gmaxes") plt.plot(num_nodes, gmeans, label="gmeans") plt.grid(axis="y") plt.xlabel("Nodes in FEM") plt.ylabel("Strain") plt.title(title) plt.tight_layout() plt.legend() plt.savefig( c.get_image_path("convergence-pier-strain", f"mmm-{append}-all.pdf", acc=False) ) plt.close() # Only plot some lines, for the thesis. plt.landscape() plt.plot(num_nodes, gmins, label="Minimum") plt.plot(num_nodes, gmaxes, label="Maximum") plt.plot(num_nodes, gmeans, label="Mean") plt.grid(axis="y") plt.title(title) plt.xlabel("Nodes in FEM") plt.ylabel("Strain") plt.legend() plt.tight_layout() plt.savefig( c.get_image_path("convergence-pier-strain", f"mmm-{append}.pdf", acc=False) ) plt.close()
def comparison_plots_705(c: Config, run_only: bool, scatter: bool): """Make contour plots for all verification points on bridge 705.""" # from classify.scenario.bridge import transverse_crack # c = transverse_crack().use(c)[0] positions = [ # (52, -8.4, "a"), (34.95459, 26.24579 - 16.6, "a"), (51.25051, 16.6 - 16.6, "b"), (89.98269, 9.445789 - 16.6, "c"), (102.5037, 6.954211 - 16.6, "d"), # (34.95459, 29.22606 - 16.6, "a"), # (51.25051, 16.6 - 16.6, "b"), # (92.40638, 12.405 - 16.6, "c"), # (101.7649, 3.973938 - 16.6, "d"), ] diana_values = pd.read_csv("validation/diana-screenshots/min-max.csv") response_types = [ResponseType.YTranslation, ResponseType.Strain] # For each response type and loading position first create contour plots for # OpenSees. Then finally create subplots comparing to Diana. cmap = diana_cmap_r for load_x, load_z, label in positions: for response_type in response_types: # Setup the metadata. if response_type == ResponseType.YTranslation: rt_str = "displa" unit_str = "mm" elif response_type == ResponseType.Strain: rt_str = "strain" unit_str = "E-6" else: raise ValueError("Unsupported response type") row = diana_values[diana_values["name"] == f"{label}-{rt_str}"] dmin, dmax = float(row["dmin"]), float(row["dmax"]) omin, omax = float(row["omin"]), float(row["omax"]) amin, amax = max(dmin, omin), min(dmax, omax) levels = np.linspace(amin, amax, 16) # Create the OpenSees plot. loads = [ PointLoad( x_frac=c.bridge.x_frac(load_x), z_frac=c.bridge.z_frac(load_z), kn=100, ) ] fem_responses = load_fem_responses( c=c, response_type=response_type, sim_runner=OSRunner(c), sim_params=SimParams(ploads=loads, response_types=response_types), ) if run_only: continue title = ( f"{response_type.name()} from a {loads[0].kn} kN point load at" + f"\nx = {load_x:.3f}m, z = {load_z:.3f}m, with ") save = lambda prefix: c.get_image_path( "validation/diana-comp", safe_str(f"{prefix}{response_type.name()}") + ".pdf", ) top_view_bridge(c.bridge, piers=True, abutments=True) fem_responses = fem_responses.resize() sci_format = response_type == ResponseType.Strain plot_contour_deck( c=c, responses=fem_responses, ploads=loads, cmap=cmap, levels=levels, sci_format=sci_format, decimals=4, scatter=scatter, ) plt.title(title + "OpenSees") plt.tight_layout() plt.savefig(save(f"{label}-")) plt.close() # Finally create label/title the Diana plot. if label is not None: # First plot and clear, just to have the same colorbar. plot_contour_deck(c=c, responses=fem_responses, ploads=loads, cmap=cmap, levels=levels) plt.cla() # Then plot the bridge and top_view_bridge(c.bridge, piers=True, abutments=True) plt.imshow( mpimg.imread( f"validation/diana-screenshots/{label}-{rt_str}.png"), extent=( c.bridge.x_min, c.bridge.x_max, c.bridge.z_min, c.bridge.z_max, ), ) dmin_s = f"{dmin:.4e}" if sci_format else f"{dmin:.4f}" dmax_s = f"{dmax:.4e}" if sci_format else f"{dmax:.4f}" dabs_s = (f"{abs(dmin - dmax):.4e}" if sci_format else f"{abs(dmin - dmax):.4f}") for point, leg_label, color, alpha in [ ((load_x, load_z), f"{loads[0].kn} kN load", "r", 1), ((0, 0), f"min = {dmin_s} {fem_responses.units}", "r", 0), ((0, 0), f"max = {dmax_s} {fem_responses.units}", "r", 0), ((0, 0), f"|min-max| = {dabs_s} {fem_responses.units}", "r", 0), ]: plt.scatter( [point[0]], [point[1]], label=leg_label, marker="o", color=color, alpha=alpha, ) plt.legend() plt.title(title + "Diana") plt.xlabel("X position (m)") plt.ylabel("Z position (m)") plt.tight_layout() plt.savefig(save(f"{label}-diana-")) plt.close()
def make_shell_properties_top_view( c: Config, shells_name_: str, prop_name_: str, refined_: bool, outline: bool, lanes: bool, ): """Make plots of the shells in top view, coloured by material property.""" original_c = c # For each scenarios scenario build the model and extract the shells. for damage_scenario, damage_name in zip(healthy_and_cracked_scenarios, [None, "cracked"]): c, sim_params = damage_scenario.use(original_c) for ctx, ctx_name, refined, in [ ( BuildContext( add_loads=[Point(x=85, y=0, z=0)], refinement_radii=[2, 1, 0.5], ), "refined", True, ), (None, "unrefined", False), ]: if refined != refined_: continue bridge_shells = get_bridge_shells(bridge=c.bridge, ctx=ctx) deck_shells = flatten(bridge_shells[0], Shell) pier_shells = flatten(bridge_shells[1], Shell) all_shells = pier_shells + deck_shells for shells_name, shells in [ ("piers", pier_shells), ("deck", deck_shells), ]: if shells_name != shells_name_: continue for prop_name, prop_units, prop_f in [ ("Mesh", "", None), ("Thickness", "m", lambda s: np.around(s.thickness, 3)), ("Density", "kg/m", lambda s: np.around(s.density, 3)), ("Poisson's ratio", "m/m", lambda s: s.poissons), ("Young's modulus", "MPa", lambda s: np.around(s.youngs, 1)), ]: if prop_name_ not in prop_name.lower(): continue for cmap in [parula_cmap, default_cmap]: def top_view(): top_view_bridge( bridge=c.bridge, abutments=True, piers=True, lanes=lanes, compass=prop_f is not None, ) top_view() shell_properties_top_view( shells=shells, prop_f=prop_f, prop_units=prop_units, cmap=cmap, colorbar=prop_f is not None, # label=prop_f is not None, outline=outline, ) top_view() damage_str = "" if damage_name is None else f" ({damage_name})" plt.title( f"{prop_name} of bridge 705's {shells_name}{damage_str}" ) plt.savefig( c.get_image_path( f"geometry/{shells_name}-shells-{ctx_name}-top-view", safe_str( f"{prop_name}-{cmap.name}-outline-{outline}-lanes-{lanes}" ) + ".pdf", )) plt.close() if prop_f is None: break
def time_series_plot(c: Config, n: float): """Plot 24min time series of cracking, for multiple cracked bridges. For each bridge (hard-coded), a time series of strain fem is plotted. For each bridge it is initially in healthy condition, and the crack occurs halfway through. Args: n: float, meters in front of the crack zone where to place sensor. """ # First construct one day (24 minutes) of traffic. total_mins = 24 total_seconds = total_mins * 60 traffic_scenario = normal_traffic(c=c, lam=5, min_d=2) traffic_sequence, traffic, traffic_array = load_traffic( c=c, traffic_scenario=traffic_scenario, max_time=total_seconds, ) traffic_array.shape # Temperatures for one day. temps_day = temperature.from_to_mins( temperature.load("holly-springs"), datetime.fromisoformat(f"2019-07-03T00:00"), datetime.fromisoformat(f"2019-07-03T23:59"), ) print(f"len temps = {len(temps_day['solar'])}") print(f"len temps = {len(temps_day['temp'])}") # Then generate some cracking time series. damages = [ HealthyDamage(), transverse_crack(), transverse_crack(length=14.0, at_x=48.0), ] sensors = [ Point(x=52, z=-8.4), # Sensor in middle of lane. Point(x=damages[1].crack_area(c.bridge)[0] - n, z=-8.4), # Sensor in front of crack zone. Point(x=damages[2].crack_area(c.bridge)[0] - n, z=-8.4), # Sensor in front of crack zone. ] [print(f"Sensor {i} = {sensors[i]}") for i in range(len(sensors))] time_series = [ crack_time_series( c=c, traffic_array=traffic_array, traffic_array_mins=total_mins, sensor=sensor, crack_frac=0.5, damage=damage, temps=temps_day["temp"], solar=temps_day["solar"], ) for damage, sensor in zip(damages, sensors) ] plt.portrait() for i, (y_trans, strain) in enumerate(time_series): x = np.arange(len(strain)) * c.sensor_hz / 60 x_m = sensors[i].x damage_str = "Healthy Bridge" if i == 1: damage_str = "0.5 m crack zone" if i == 2: damage_str = "14 m crack zone" plt.subplot(len(time_series), 2, i * 2 + 1) plt.plot(x, y_trans * 1000, color="tab:blue") if i < len(time_series) - 1: plt.tick_params(axis="x", bottom=False, labelbottom=False) else: plt.xlabel("Hours") plt.title(f"At x = {x_m} m\n{damage_str}") plt.ylabel("Y trans. (mm)") plt.subplot(len(time_series), 2, i * 2 + 2) plt.plot(x, strain * 1e6, color="tab:orange") if i < len(time_series) - 1: plt.tick_params(axis="x", bottom=False, labelbottom=False) else: plt.xlabel("Hours") plt.title(f"At x = {x_m} m,\n{damage_str}") plt.ylabel("Microstrain XXB") plt.tight_layout() plt.savefig(c.get_image_path("crack", "time-series-q5.pdf")) plt.close()