def make_available_sensors_plot(c: Config, pier_radius: float, track_radius: float, edge_radius: float): """Scatter plot of sensors used for classification.""" top_view_bridge(c.bridge, abutments=True, piers=True, compass=False) plot_deck_sensors( c=c, without=without.points( c=c, pier_radius=pier_radius, track_radius=track_radius, edge_radius=edge_radius, ), label=True, ) for l_i, load in enumerate([Point(x=21, z=-8.4), Point(x=33, z=-4)]): plt.scatter( [load.x], [load.z], color="red", marker="o", s=50, label="Sensor of interest" if l_i == 0 else None, ) legend_marker_size(plt.legend(), 50) plt.title(f"Sensors available for classification on Bridge 705") plt.tight_layout() plt.savefig(c.get_image_path("sensors", "unavailable-sensors.pdf")) plt.close()
def plot_deck_sensors(c: Config, without: Callable[[Point], bool], label: bool = False): """Scatter plot of deck sensors.""" deck_nodes, _ = get_bridge_nodes(c.bridge) deck_nodes = det_nodes(deck_nodes) unavail_nodes = [] avail_nodes = [] for node in deck_nodes: if without(Point(x=node.x, y=node.y, z=node.z)): unavail_nodes.append(node) else: avail_nodes.append(node) X, Z, H = [], [], [] # 2D arrays, x and z coordinates, and height. for node in deck_nodes: X.append(node.x) Z.append(node.z) if without(Point(x=node.x, y=node.y, z=node.z)): H.append(1) else: H.append(0) plt.scatter( [node.x for node in avail_nodes], [node.z for node in avail_nodes], s=5, color="#1f77b4", ) plt.scatter( [node.x for node in unavail_nodes], [node.z for node in unavail_nodes], color="#ff7f0e", s=5, ) if label: plt.scatter( [avail_nodes[0].x], [avail_nodes[0].z], color="#1f77b4", label="Available", s=5, ) plt.scatter( [unavail_nodes[0].x], [unavail_nodes[0].z], color="#ff7f0e", label="Unavailable", s=5, ) legend = plt.legend() legend_marker_size(legend, 50)
def convert_sim_translation_responses( nodes: List[Node], sim_ind: int, response_type: ResponseType, parsed_sim_responses: Dict[ResponseType, List[List[float]]], converted_expt_responses: Dict[int, Dict[ResponseType, List["Response"]]], ): """Convert parsed simulation translation fem to List[Response]. The converted fem will be entered into the given dictionary. """ # If the requested response type is not available do nothing. # TODO: Should we not raise an Error? if response_type not in parsed_sim_responses: return parsed_sim_trans_responses = parsed_sim_responses[response_type] result = [] # The List[Response] that we are converting to. node_index = 0 # Index of node corresponding to current response. # For each time step in the simulation. for time in range(len(parsed_sim_trans_responses)): # For each collected response at that time. for i in range(len(parsed_sim_trans_responses[time])): node = nodes[node_index] result.append( ( parsed_sim_trans_responses[time][i], Point(x=node.x, y=node.y, z=node.z), ) ) node_index += 1 converted_expt_responses[sim_ind][response_type] = result
def cracked_concrete_plots(c: Config): """Contour plots of cracked concrete scenarios.""" response_type = ResponseType.YTranslation # 10 x 10 grid of points on the bridge deck where to record fem. points = [ Point(x=x, y=0, z=z) for x, z in itertools.product( np.linspace(c.bridge.x_min, c.bridge.x_max, 10), np.linspace(c.bridge.z_min, c.bridge.z_max, 10), ) ] # Create empty traffic array and collect fem. response_array = responses_to_traffic_array( c=c, traffic_array=load_normal_traffic_array(c)[0], response_type=response_type, bridge_scenario=cracked_scenario, points=points, sim_runner=OSRunner, ) for t in range(len(response_array)): top_view_bridge(c.bridge, abutments=True, piers=True) responses = Responses.from_responses( response_type=response_type, responses=[(response_array[t][p], point) for p, point in enumerate(points)], ) plot_contour_deck(c=c, responses=responses, center_norm=True) plt.title("Cracked Concrete") plt.savefig(c.get_image_path("cracked-scenario", f"cracked-time-{t}")) plt.close()
def make_node_plots(original_c: Config): """Make all variations of 3d scatter plots of nodes.""" for damage_scenario in healthy_and_cracked_scenarios: c, sim_params = damage_scenario.use(original_c, SimParams([])) for ctx, ctx_name in [ (BuildContext(add_loads=[Point(x=85, y=0, z=0)]), "refined"), (None, "unrefined"), ]: bridge_nodes = get_bridge_nodes(bridge=c.bridge, ctx=ctx) deck_nodes = set(flatten(bridge_nodes[0], Node)) pier_nodes = set(flatten(bridge_nodes[1], Node)) all_nodes = set(flatten(bridge_nodes, Node)) # For each combination of parameters plot the nodes. for nodes_name, nodes in [ ("all", all_nodes), ("deck", deck_nodes), ("pier", pier_nodes), ]: node_scatter_3d(nodes=nodes) plt.title(f"Nodes of {c.bridge.name}") plt.savefig( c.get_image_path( f"geometry/nodes-{ctx_name}", safe_str(f"{nodes_name}") + ".pdf", )) plt.close()
def temperature_effect_date(c: Config, month: str, vert: bool): temp = __init__.load(name=month) point = Point(x=51, y=0, z=-8.4) plt.landscape() def plot_hours(): if not vert: return label_set = False for dt in temp["datetime"]: if np.isclose(float(dt.hour + dt.minute), 0): label = None if not label_set: label = "Time at vertical line = 00:00" label_set = True plt.axvline(x=dt, linewidth=1, color="black", label=label) # Plot the temperature. plt.subplot(2, 1, 1) plot_hours() plt.scatter( temp["datetime"], temp["temp"], c=temp["missing"], cmap=mpl.cm.get_cmap("bwr"), s=1, ) plt.ylabel("Temperature (°C)") plt.xlabel("Date") plt.gcf().autofmt_xdate() plt.title(f"Temperature in {str(month[0]).upper()}{month[1:]}") plt.legend() # Plot the effect at a point. response_type = ResponseType.YTranslation plt.subplot(2, 1, 2) plot_hours() effect = __init__.effect( c=c, response_type=response_type, points=[point], temps=temp["temp"] )[0] plt.scatter( temp["datetime"], effect * 1000, c=temp["missing"], cmap=mpl.cm.get_cmap("bwr"), s=1, ) plt.ylabel(f"{response_type.name()} (mm)") plt.xlabel("Date") plt.gcf().autofmt_xdate() plt.title(f"{response_type.name()} to unit thermal loading in {month}") # Save. plt.tight_layout() plt.savefig(c.get_image_path("classify/temperature", f"{month}.png")) plt.savefig(c.get_image_path("classify/temperature", f"{month}.pdf")) plt.close()
def pairwise_cluster(c: Config, load: bool): """Cluster pairwise maps from healthy and damaged scenarios.""" features_path = c.get_data_path("features", "pairwise-cluster", bridge=False) if not load: normal_traffic_array, _ = load_normal_traffic_array(c=c, mins=24) normal_traffic_array = normal_traffic_array[ int(len(normal_traffic_array) / 24) : ] response_type = ResponseType.YTranslation grid_points = [ Point(x=x, y=0, z=-9.65) for x, _ in itertools.product( np.linspace(c.bridge.x_min, c.bridge.x_max, 50), # np.linspace(c.bridge.x_min, c.bridge.x_max, 4), [1], ) ] # Collect a list of features per scenarios scenario. features = [] for damage_scenario in healthy_and_cracked_scenarios[1:]: damage_c = damage_scenario.use(c) responses = responses_to_traffic_array( c=damage_c, traffic_array=normal_traffic_array, response_type=response_type, bridge_scenario=damage_scenario, points=grid_points, sim_runner=OSRunner, ).T ks_values = [] for p0_i, point0 in enumerate(grid_points): print_i(f"Point {p0_i + 1} / {len(grid_points)}", end="\r") ks_values.append([]) for p1_i, point1 in enumerate(grid_points): ks = ks_no_outliers(responses[p0_i], responses[p1_i]) ks_values[-1].append(ks) features.append((ks_values, damage_scenario.name)) # Save features to disk. features = np.array(features) np.save(features_path, features) features = np.load(features_path) # Reduce each pairwise map to a sum per sensor. for f_i, (feature, feature_name) in enumerate(features): features[f_i] = ([sum(sensor) for sensor in feature], feature_name) features[f_i] = ([sum(sensor) for sensor in features[f_i]], feature_name) # Cluster each pairwise map. from sklearn.cluster import KMeans kmeans = KMeans(n_clusters=2) kmeans.fit(features)
def without(self, remove: Callable[[Point], bool]) -> "Responses": responses = [] for x, y_dict in self.responses[self.times[0]].items(): for y, z_dict in y_dict.items(): for z, response in z_dict.items(): p = Point(x=x, y=y, z=z) if not remove(p): responses.append((response, p)) # if abs(p.distance(of)) > radius: return Responses(response_type=self.response_type, responses=responses, units=self.units)
def center(self) -> Point: """Point at the center of the element.""" if not hasattr(self, "_center"): node_0 = self.nodes_by_id[self.ni_id] node_1 = self.nodes_by_id[self.nk_id] delta_x = abs(node_0.x - node_1.x) delta_y = abs(node_0.y - node_1.y) delta_z = abs(node_0.z - node_1.z) min_x = min(node_0.x, node_1.x) min_y = min(node_0.y, node_1.y) min_z = min(node_0.z, node_1.z) self._center = Point(x=min_x + delta_x / 2, y=min_y + delta_y / 2, z=min_z + delta_z / 2) return self._center
def make_shell_properties_3d(original_c: Config): """Make plots of the shells in 3D, coloured by material property.""" # For each scenarios scenario build the model and extract the shells. for damage_scenario in healthy_and_cracked_scenarios: c, sim_params = damage_scenario.use(original_c, SimParams([])) for ctx, ctx_name in [ (BuildContext(add_loads=[Point(x=85, y=0, z=0)]), "refined"), (None, "unrefined"), ]: bridge_shells = get_bridge_shells(bridge=c.bridge, ctx=ctx) deck_shells = flatten(bridge_shells[0], Shell) pier_shells = flatten(bridge_shells[1], Shell) all_shells = flatten(bridge_shells, Shell) # For each combination of parameters plot the shells. for shells_name, shells in [ ("pier", pier_shells), ("all", all_shells), ("deck", deck_shells), ]: for outline, label in itertools.product([True, False], [True, False]): for prop_name, prop_units, prop_f in [ ("Thickness", "m", lambda s: s.thickness), ("Density", "kg/m", lambda s: s.density), ("Poisson's ratio", "m/m", lambda s: s.poissons), ("Young's modulus", "MPa", lambda s: s.youngs), ]: for cmap in [default_cmap, get_cmap("tab10")]: shell_properties_3d( shells=shells, prop_units=prop_units, prop_f=prop_f, cmap=cmap, outline=outline, label=label, colorbar=not label, ) plt.title(f"{prop_name} of {c.bridge.name}") plt.savefig( c.get_image_path( f"geometry/shells-{ctx_name}-3d", safe_str( f"{shells_name}-{prop_name}-outline-{outline}-{cmap.name}" ) + ".pdf", )) plt.close()
def traffic_response_plots(c: Config, times: int = 3): """Response to normal traffic per scenarios scenario at multiple time steps.""" response_type = ResponseType.YTranslation # 10 x 10 grid of points on the bridge deck where to record fem. points = [ Point(x=x, y=0, z=z) for x, z in itertools.product( np.linspace(c.bridge.x_min, c.bridge.x_max, 10), np.linspace(c.bridge.z_min, c.bridge.z_max, 10), ) ] # for damage_scenario in all_scenarios(c): for damage_scenario in [unit_temp_scenario]: response_array = responses_to_traffic_array( c=c, traffic_array=load_normal_traffic_array(c, mins=1)[0], response_type=response_type, bridge_scenario=damage_scenario, points=points, sim_runner=OSRunner, ) print(response_array.shape) mean_response_array = np.mean(response_array, axis=0).T print(mean_response_array.shape) print(mean_response_array.shape) for t in range(times): time_index = -1 + abs(t) top_view_bridge(c.bridge, abutments=True, piers=True) responses = Responses.from_responses( response_type=response_type, responses=[(response_array[time_index][p], point) for p, point in enumerate(points)], ) plot_contour_deck(c=c, responses=responses, center_norm=True, levels=100) plt.title(damage_scenario.name) plt.savefig( c.get_image_path( "contour-traffic-response", f"{damage_scenario.name}-time={time_index}", )) plt.close()
def oneclass(c: Config): normal_traffic_array, traffic_scenario = load_normal_traffic_array(c) bridge_scenarios = [HealthyScenario()] + each_pier_scenarios(c) response_type = ResponseType.YTranslation points = [ Point(x=x, y=0, z=z) for x, z in itertools.product( np.linspace(c.bridge.x_min, c.bridge.x_max / 2, 20), np.linspace(c.bridge.z_min, c.bridge.z_max / 2, 3), ) ] results = [] for b, bridge_scenario in enumerate(bridge_scenarios): print_i(f"One class: bridge scenario {bridge_scenario.name}") responses = responses_to_traffic_array( c=c, traffic_array=normal_traffic_array, response_type=response_type, bridge_scenario=bridge_scenario, points=points, fem_runner=OSRunner(c), ).T print(len(normal_traffic_array)) print(responses.shape) # Fit on the healthy scenario. if b == 0: assert len(responses) == len(points) clfs = [] for r, rs in enumerate(responses): print_i(f"Training classifier {r} / {len(responses)}") clfs.append(OneClassSVM().fit(rs.reshape(-1, 1))) scenario_results = [] for p, _ in enumerate(points): print_i(f"Predicting points {p} / {len(points)}") prediction = clfs[p].predict(responses[p].reshape(-1, 1)) print(prediction) print(len(prediction[prediction < 0])) print(len(prediction[prediction > 0]))
def gradient_pier_displacement_plot( c: Config, pier_disp: PierSettlementScenario, response_type: ResponseType, title: str, ): """Contour plot of piers displaced in an increasing gradient.""" # 10 x 10 grid of points on the bridge deck where to record fem. points = [ Point(x=x, y=0, z=z) for x, z in itertools.product( np.linspace(c.bridge.x_min, c.bridge.x_max, 10), np.linspace(c.bridge.z_min, c.bridge.z_max, 10), ) ] # Create empty traffic array and collect fem. response_array = responses_to_traffic_array( c=c, traffic_array=np.zeros( (1, len(c.bridge.wheel_tracks(c)) * c.il_num_loads)), response_type=response_type, bridge_scenario=pier_disp, points=points, fem_runner=OSRunner(c), ) top_view_bridge(c.bridge, abutments=True, piers=True) responses = Responses.from_responses( response_type=response_type, responses=[(response_array[0][p], point) for p, point in enumerate(points)], ) plot_contour_deck(c=c, responses=responses, center_norm=True) plt.title(title) plt.savefig( c.get_image_path("pier-scenarios", f"pier-displacement-{safe_str(title)}")) plt.close()
def animate_mv_vehicle( c: Config, mv_vehicle: Vehicle, response_type: ResponseType, fem_runner: FEMRunner, num_x_fracs: int = 100, per_axle: bool = False, save: str = None, show: bool = False, ): """Animate the bridge's response to a moving vehicles.""" times = list(times_on_bridge(c=c, mv_vehicles=[mv_vehicles])) at = [ Point(x=c.bridge.x(x_frac)) for x_frac in np.linspace(0, 1, num_x_fracs) ] responses = responses_to_mv_vehicles( c=c, mv_vehicles=[mv_vehicles], response_types=[response_type], fem_runner=fem_runner, times=times, at=at, per_axle=per_axle, ) # Reshape to have only a single response type and moving vehicles. new_shape = [d for d in responses.shape if d != 1] responses = responses.reshape(new_shape) animate_bridge_response( c=c, responses=[responses], response_type=response_type, mv_vehicles=[mv_vehicles], save=save, show=show, )
def run_ulm(c: Config, healthy: bool, cracked: bool, x_i: float, z_i: float): """Run all unit load simulations.""" response_type = ResponseType.YTranslation wheel_xs = c.bridge.wheel_track_xs(c) wheel_x = wheel_xs[x_i] wheel_zs = c.bridge.wheel_track_zs(c) wheel_z = wheel_zs[z_i] print_i(f"Wheel (x, z) = ({wheel_x}, {wheel_z})") point = Point(x=wheel_x, y=0, z=wheel_z) if healthy: ULResponses.load_ulm( c=c, response_type=response_type, points=[point], sim_runner=OSRunner(c), ) if cracked: c = transverse_crack().use(c)[0] ULResponses.load_ulm( c=c, response_type=response_type, points=[point], sim_runner=OSRunner(c), )
def convert_strain_responses( elements: List[Shell], sim_ind: int, parsed_sim_responses: Dict[ResponseType, List[List[float]]], converted_expt_responses: Dict[int, Dict[ResponseType, List["Response"]]], ): if not any(rt.is_strain() or rt.is_stress() for rt in parsed_sim_responses): return parsed_sim_strain = parsed_sim_responses[ResponseType.StrainXXB] result_bottom, result_bottom_z, result_top = [], [], [] print_w("Elements belonging to piers will not have strain recorded") print_w("Strain fem are specified to be at y=0, but recorded lower") # For each integration point.. assert len(parsed_sim_strain) == 4 for i_point in range(4): # ..consider the fem for each element. assert len(elements) == len(parsed_sim_strain[i_point]) for element, el_responses in zip(elements, parsed_sim_strain[i_point]): # Skip any elements belonging to the pier. if element.pier: continue # First calculate the center offset of the integration points.. if not hasattr(element, "i_point_offset"): element.i_point_offset = ( element.length() / 2 * (1 / np.sqrt(3)), element.width() / 2 * (1 / np.sqrt(3)), ) i_point_x_offset, i_point_z_offset = element.i_point_offset # ..then determine the position of each integration point. response_point = deepcopy(element.center()) if i_point + 1 == 1: response_point.x -= i_point_x_offset response_point.z -= i_point_z_offset elif i_point + 1 == 2: response_point.x += i_point_x_offset response_point.z -= i_point_z_offset elif i_point + 1 == 3: response_point.x += i_point_x_offset response_point.z += i_point_z_offset elif i_point + 1 == 4: response_point.x -= i_point_x_offset response_point.z += i_point_z_offset else: raise ValueError("Unknown integration point {i_point + 1}") # if ( # np.isclose(element.center().x, 2.170312) and # np.isclose(element.center().z, 12.8495) # ): # print() # print(element.center()) # print(element.length(), element.width()) # print(element.i_point_offset) # print(response_point) # Calculate and record the response. eps11, eps22, _g12, theta11, theta22, theta33, _g13, _g23 = list( el_responses ) half_height = element.section.thickness / 2 # print(response_point.x, response_point.y, response_point.z) # print(eps11) result_bottom.append( ( (eps11 - (theta11 * half_height)) * -1e6, Point(x=response_point.x, y=response_point.y, z=response_point.z,), ) ) result_bottom_z.append( ( (eps22 - (theta22 * half_height)) * -1e6, Point(x=response_point.x, y=response_point.y, z=response_point.z,), ) ) result_top.append( ( (eps11 + (theta11 * half_height)) * -1e6, Point(x=response_point.x, y=response_point.y, z=response_point.z,), ) ) converted_expt_responses[sim_ind][ResponseType.StrainXXB] = result_bottom converted_expt_responses[sim_ind][ResponseType.StrainXXT] = result_top converted_expt_responses[sim_ind][ResponseType.StrainZZB] = result_bottom_z print(len(result_bottom))
def deck_points(self) -> List[Point]: """All the points on the deck where fem are collected.""" return [ Point(x=x, y=0, z=z) for _, (x, y, z) in self.values(point=True) if np.isclose(y, 0) ]
def plot_nesw_convergence( c: Config, df: pd.DataFrame, responses: Dict[float, Responses], point: Point, max_distance: float, from_: str, ): """Plot convergence of strain at different points around a load.""" delta_distance = 0.05 skip = 3 # Create color mappable for distances. norm = matplotlib.colors.Normalize(vmin=0, vmax=max_distance) cmap = matplotlib.cm.get_cmap("jet") mappable = matplotlib.cm.ScalarMappable(norm=norm, cmap=cmap) color = lambda d: mappable.to_rgba(d) # For each compass point. compass_dir = { "N": (0, 1), "E": (1, 0), "S": (0, -1), "W": (-1, 0), } plt.square() fig, axes = plt.subplots(nrows=2, ncols=2) for ax, compass, compass_name, in zip( axes.flat, ["N", "S", "E", "W"], ["North", "South", "East", "West"] ): # Collect data into fem. x_mul, z_mul = compass_dir[compass] for distance in np.arange(0, max_distance, step=delta_distance)[::skip]: dist_point = Point( x=point.x + (distance * x_mul), y=point.y, z=point.z + (distance * z_mul), ) print(dist_point) if ( dist_point.x < c.bridge.x_min or dist_point.x > c.bridge.x_max or dist_point.z < c.bridge.z_min or dist_point.z > c.bridge.z_max ): break line_responses = [] for max_shell_len, sim_responses in responses.items(): deck_nodes = float(df.at[max_shell_len, "deck-nodes"]) pier_nodes = float(df.at[max_shell_len, "pier-nodes"]) line_responses.append( ( deck_nodes + pier_nodes, # max_shell_len, scalar(sim_responses.at_deck(dist_point, interp=True)), ) ) line_responses = np.array(sorted(line_responses, key=lambda t: t[0])).T ax.plot(line_responses[0], line_responses[1], color=color(distance)) if distance > max_distance: break ax.grid(axis="y") ax.set_title( f"Strain at increasing distance\nin direction {compass_name} from\n{from_}" ) ax.set_xlabel("Nodes in FEM") ax.set_ylabel("Strain") # ax.set_xlim(ax.get_xlim()[1], ax.get_xlim()[0]) plt.tight_layout() clb = plt.colorbar(mappable, ax=axes.ravel()) clb.ax.set_title("Distance (m)")
def events(c: Config, x: float, z: float): """Plot events due to normal traffic.""" point = Point(x=x, y=0, z=z) # 10 seconds of 'normal' traffic. max_time = 10 traffic_scenario = normal_traffic(c=c, lam=5, min_d=2) # Create the 'TrafficSequence' and 'TrafficArray'. traffic_sequence = traffic_scenario.traffic_sequence( bridge=c.bridge, max_time=max_time ) traffic_array = to_traffic_array( c=c, traffic_sequence=traffic_sequence, max_time=max_time ) # Find when the simulation has warmed up, and when 'TrafficArray' begins. warmed_up_at = traffic_sequence[0][0].time_left_bridge(c.bridge) traffic_array_starts = (int(warmed_up_at / c.sensor_hz) + 1) * c.sensor_hz print(f"warmed up at = {warmed_up_at}") print(f"traffic_array_starts = {traffic_array_starts}") traffic_array_ends = traffic_array_starts + (len(traffic_array) * c.sensor_hz) print(f"traffic_array_ends = {traffic_array_ends}") point_lane_ind = c.bridge.closest_lane(z) vehicles = list(set(ts[0] for ts in traffic_sequence)) print(len(vehicles)) print(vehicles[0]) vehicles = sorted( set(ts[0] for ts in traffic_sequence if ts[0].lane == point_lane_ind), key=lambda v: -v.init_x_frac, ) print(len(vehicles)) print(vehicles[0]) event_indices = [] vehicle_times = [v.time_at(x=x - 2, bridge=c.bridge) for v in vehicles] for v, t in zip(vehicles, vehicle_times): print(f"Vehicle {v.init_x_frac} {v.mps} at time {t}") start_time = int(t / c.sensor_hz) * c.sensor_hz print(f"start_time = {start_time}") ta_start_time = np.around(start_time - traffic_array_starts, 8) print(f"ta start time = {ta_start_time}") ta_start_index = int(ta_start_time / c.sensor_hz) print(f"ta start index = {ta_start_index}") ta_end_index = ta_start_index + int(c.event_time_s / c.sensor_hz) print(f"ta end index = {ta_end_index}") if ta_start_index >= 0 and ta_end_index < len(traffic_array): event_indices.append((ta_start_index, ta_end_index)) print(event_indices) responses = ( responses_to_traffic_array( c=c, traffic_array=traffic_array, response_type=ResponseType.YTranslation, damage_scenario=healthy_scenario, points=[point], sim_runner=OSRunner(c), ) * 1000 ) # fem = add_displa_noise(fem) print(responses.shape) plt.portrait() for event_ind, (event_start, event_end) in enumerate(event_indices): plt.subplot(len(event_indices), 1, event_ind + 1) plt.plot(responses[event_start : event_end + 1]) plt.tight_layout() plt.savefig(c.get_image_path("classify/events", "events.pdf")) plt.close()
def point_load_response_plots(c: Config, x: float, z: float, kn: int = 1000, run: bool = False): """Response to a point load per scenarios scenario.""" response_types = [ResponseType.YTranslation, ResponseType.Strain] # scenarios = all_scenarios(c) damage_scenarios = [HealthyScenario(), transverse_crack()] # 10 x 10 grid of points on the bridge deck where to record fem. points = [ Point(x=x, y=0, z=z) for x, z in itertools.product( np.linspace(c.bridge.x_min, c.bridge.x_max, 30), np.linspace(c.bridge.z_min, c.bridge.z_max, 100), ) ] for response_type in response_types: all_responses = [] for damage_scenario in damage_scenarios: sim_params = SimParams( response_types=[response_type], ploads=[ PointLoad(x_frac=c.bridge.x_frac(x), z_frac=c.bridge.z_frac(z), kn=kn) ], ) use_c, sim_params = damage_scenario.use(c=c, sim_params=sim_params) all_responses.append( load_fem_responses( c=use_c, sim_params=sim_params, response_type=response_type, sim_runner=OSRunner(use_c), run=run, ).resize()) amin, amax = np.inf, -np.inf for sim_responses in all_responses: responses = np.array(list(sim_responses.values())) amin = min(amin, min(responses)) amax = max(amax, max(responses)) for d, damage_scenario in enumerate(damage_scenarios): top_view_bridge(c.bridge, abutments=True, piers=True) plot_contour_deck( c=c, responses=all_responses[d], levels=100, norm=colors.Normalize(vmin=amin, vmax=amax), decimals=10, ) plt.title(damage_scenario.name) plt.tight_layout() plt.savefig( c.get_image_path( "contour/point-load", safe_str( f"x-{x:.2f}-z-{z:.2f}-kn-{kn}-{response_type.name()}-{damage_scenario.name}" ) + ".pdf", )) plt.close()
def make_shell_properties_top_view( c: Config, shells_name_: str, prop_name_: str, refined_: bool, outline: bool, lanes: bool, ): """Make plots of the shells in top view, coloured by material property.""" original_c = c # For each scenarios scenario build the model and extract the shells. for damage_scenario, damage_name in zip(healthy_and_cracked_scenarios, [None, "cracked"]): c, sim_params = damage_scenario.use(original_c) for ctx, ctx_name, refined, in [ ( BuildContext( add_loads=[Point(x=85, y=0, z=0)], refinement_radii=[2, 1, 0.5], ), "refined", True, ), (None, "unrefined", False), ]: if refined != refined_: continue bridge_shells = get_bridge_shells(bridge=c.bridge, ctx=ctx) deck_shells = flatten(bridge_shells[0], Shell) pier_shells = flatten(bridge_shells[1], Shell) all_shells = pier_shells + deck_shells for shells_name, shells in [ ("piers", pier_shells), ("deck", deck_shells), ]: if shells_name != shells_name_: continue for prop_name, prop_units, prop_f in [ ("Mesh", "", None), ("Thickness", "m", lambda s: np.around(s.thickness, 3)), ("Density", "kg/m", lambda s: np.around(s.density, 3)), ("Poisson's ratio", "m/m", lambda s: s.poissons), ("Young's modulus", "MPa", lambda s: np.around(s.youngs, 1)), ]: if prop_name_ not in prop_name.lower(): continue for cmap in [parula_cmap, default_cmap]: def top_view(): top_view_bridge( bridge=c.bridge, abutments=True, piers=True, lanes=lanes, compass=prop_f is not None, ) top_view() shell_properties_top_view( shells=shells, prop_f=prop_f, prop_units=prop_units, cmap=cmap, colorbar=prop_f is not None, # label=prop_f is not None, outline=outline, ) top_view() damage_str = "" if damage_name is None else f" ({damage_name})" plt.title( f"{prop_name} of bridge 705's {shells_name}{damage_str}" ) plt.savefig( c.get_image_path( f"geometry/{shells_name}-shells-{ctx_name}-top-view", safe_str( f"{prop_name}-{cmap.name}-outline-{outline}-lanes-{lanes}" ) + ".pdf", )) plt.close() if prop_f is None: break
def experiment_noise(c: Config): """Plot displacement and strain noise from dynamic test 1""" ################ # Displacement # ################ plt.portrait() # Find points of each sensor. displa_labels = ["U13", "U26", "U29"] displa_points = [] for displa_label in displa_labels: sensor_x, sensor_z = _displa_sensor_xz(displa_label) displa_points.append(Point(x=sensor_x, y=0, z=sensor_z)) # For each sensor plot and estimate noise. side = 700 for s_i, displa_label in enumerate(displa_labels): # First plot the signal, and smoothed signal. plt.subplot(len(displa_points), 2, (s_i * 2) + 1) with open(f"validation/experiment/D1a-{displa_label}.txt") as f: data = list(map(float, f.readlines())) # Find the center of the plot, minimum point in first 15000 points. data_center = 0 for i in range(15000): if data[i] < data[data_center]: data_center = i data = data[data_center - side:data_center + side] smooth = savgol_filter(data, 31, 3) plt.plot(data, linewidth=1) plt.plot(smooth, linewidth=1) plt.ylim(-0.8, 0.3) plt.title(f"{displa_label} in dynamic test") # Then plot subtraction of smoothed from noisey. plt.subplot(len(displa_points), 2, (s_i * 2) + 2) noise = data - smooth plt.plot(noise, label=f"σ = {np.around(np.std(noise), 4)}") plt.legend() plt.title(f"Noise from {displa_label}") plt.tight_layout() plt.savefig(c.get_image_path("params", "noise-displa.pdf")) plt.close() ########## # Strain # ########## plt.portrait() # Find points of each sensor. strain_labels = ["T1", "T10", "T11"] strain_points = [] for strain_label in strain_labels: sensor_x, sensor_z = _strain_sensor_xz(strain_label) strain_points.append(Point(x=sensor_x, y=0, z=sensor_z)) # For each sensor plot and estimate noise. side = 700 xmin, xmax = np.inf, -np.inf for s_i, strain_label in enumerate(strain_labels): # First plot the signal, and smoothed signal. plt.subplot(len(strain_points), 2, (s_i * 2) + 1) with open(f"validation/experiment/D1a-{strain_label}.txt") as f: data = list(map(float, f.readlines())) # Find the center of the plot, minimum point in first 15000 points. data_center = 0 for i in range(15000): if data[i] < data[data_center]: data_center = i data = data[data_center - side:data_center + side] smooth = savgol_filter(data, 31, 3) plt.plot(data, linewidth=1) plt.plot(smooth, linewidth=1) plt.title(f"{strain_label} in dynamic test") # Then plot subtraction of smoothed from noisey. plt.subplot(len(strain_points), 2, (s_i * 2) + 2) noise = data - smooth plt.plot(noise, label=f"σ = {np.around(np.std(noise), 4)}") plt.legend() plt.title(f"Noise from {strain_label}") plt.tight_layout() plt.savefig(c.get_image_path("params", "noise-strain.pdf")) plt.close()
def number_of_uls_plot(c: Config): """Plot error as a function of number of unit load simulations.""" if not c.shorten_paths: raise ValueError("This plot requires --shorten-paths true") response_type = ResponseType.YTranslation num_ulss = np.arange(100, 2000, 10) chosen_uls = 600 point = Point(x=c.bridge.x_max - (c.bridge.length / 2), y=0, z=-8.4) wagen1_time = truck1.time_at(x=point.x, bridge=c.bridge) print_i(f"Wagen 1 time at x = {point.x:.3f} is t = {wagen1_time:.3f}") # Determine the reference value. truck_loads = flatten( truck1.to_point_load_pw(time=wagen1_time, bridge=c.bridge), PointLoad) print_i(f"Truck loads = {truck_loads}") sim_responses = load_fem_responses( c=c, response_type=response_type, sim_runner=OSRunner(c), sim_params=SimParams(ploads=truck_loads, response_types=[response_type]), ) ref_value = sim_responses.at_deck(point, interp=True) * 1000 print_i(f"Reference value = {ref_value}") # Collect the data. total_load = [] num_loads = [] responses = [] for num_uls in num_ulss: c.il_num_loads = num_uls # Nested in here because it depends on the setting of 'il_num_loads'. truck_loads = flatten( truck1.to_wheel_track_loads(c=c, time=wagen1_time), PointLoad) num_loads.append(len(truck_loads)) total_load.append(sum(map(lambda l: l.kn, truck_loads))) sim_responses = load_fem_responses( c=c, response_type=response_type, sim_runner=OSRunner(c), sim_params=SimParams(ploads=truck_loads, response_types=[response_type]), ) responses.append(sim_responses.at_deck(point, interp=True) * 1000) # Plot the raw fem, then error on the second axis. plt.landscape() # plt.plot(num_ulss, fem) # plt.ylabel(f"{response_type.name().lower()} (mm)") plt.xlabel("ULS") error = np.abs(np.array(responses) - ref_value).flatten() * 100 # ax2 = plt.twinx() plt.plot(num_ulss, error) plt.ylabel("Error (%)") plt.title( f"Error in {response_type.name()} to Truck 1 as a function of ULS") # Plot the chosen number of ULS. chosen_error = np.interp([chosen_uls], num_ulss, error)[0] plt.axhline( chosen_error, label=f"At {chosen_uls} ULS, error = {np.around(chosen_error, 2)} %", color="black", ) plt.axhline(0, color="red", label="Response from direct simulation (no wheel tracks)") plt.legend() plt.tight_layout() plt.savefig(c.get_image_path("paramselection", "uls.pdf")) plt.close() # Additional verification plots. plt.plot(num_ulss, total_load) plt.savefig(c.get_image_path("paramselection", "uls-verify-total-load.pdf")) plt.close() plt.plot(num_ulss, num_loads) plt.savefig(c.get_image_path("paramselection", "uls-verify-num-loads.pdf")) plt.close()
def pairwise_sensors(c: Config, dist_measure=ks_no_outliers): """Compare distribution of pairs of sensors under HealthyScenario.""" normal_traffic_array, traffic_scenario = load_normal_traffic_array(c) response_type = ResponseType.YTranslation points = [ Point(x=x, y=0, z=z) for x, z in itertools.product( np.linspace(c.bridge.x_min, c.bridge.x_max, 50), np.linspace(c.bridge.z_min, c.bridge.z_max, 4), ) ] bridge_scenario = HealthyScenario() responses = responses_to_traffic_array( c=c, traffic_array=normal_traffic_array, response_type=response_type, bridge_scenario=bridge_scenario, points=points, sim_runner=OSRunner, ).T assert len(responses) == len(points) ks_values_healthy = [] for p0, point0 in enumerate(points): print_i(f"Point {p0 + 1} / {len(points)}") ks_values_healthy.append([]) for p1, point1 in enumerate(points): ks = dist_measure(responses[p0], responses[p1]) ks_values_healthy[-1].append(ks) plt.landscape() plt.imshow(ks_values_healthy) plt.savefig(c.get_image_path("joint-clustering", "healthy-bridge")) plt.close() bridge_scenario = each_pier_scenarios(c)[0] responses = responses_to_traffic_array( c=c, traffic_array=normal_traffic_array, response_type=response_type, bridge_scenario=bridge_scenario, points=points, sim_runner=OSRunner, ).T assert len(responses) == len(points) ks_values_damage = [] for p0, point0 in enumerate(points): print_i(f"Point {p0 + 1} / {len(points)}") ks_values_damage.append([]) for p1, point1 in enumerate(points): ks = dist_measure(responses[p0], responses[p1]) ks_values_damage[-1].append(ks) plt.imshow(ks_values_damage) plt.savefig(c.get_image_path("joint-clustering", "scenarios-bridge")) plt.close() ks_values_comp = [] for p0, point0 in enumerate(points): ks_values_comp.append([]) for p1, point1 in enumerate(points): comp = abs(ks_values_healthy[p0][p1] - ks_values_damage[p0][p1]) ks_values_comp[-1].append(comp) plt.landscape() plt.imshow(ks_values_comp) plt.savefig(c.get_image_path("joint-clustering", "scenarios-bridge-comp")) plt.close() responses = Responses.from_responses( response_type=response_type, responses=[(sum(ks_values_comp[p]), point) for p, point in enumerate(points)], ) top_view_bridge(c.bridge, abutments=True, piers=True) plot_contour_deck(c=c, responses=responses) plt.savefig(c.get_image_path("joint-clustering", "scenarios-bridge-comp-contour")) plt.close()
def time_series_plot(c: Config, n: float): """Plot 24min time series of cracking, for multiple cracked bridges. For each bridge (hard-coded), a time series of strain fem is plotted. For each bridge it is initially in healthy condition, and the crack occurs halfway through. Args: n: float, meters in front of the crack zone where to place sensor. """ # First construct one day (24 minutes) of traffic. total_mins = 24 total_seconds = total_mins * 60 traffic_scenario = normal_traffic(c=c, lam=5, min_d=2) traffic_sequence, traffic, traffic_array = load_traffic( c=c, traffic_scenario=traffic_scenario, max_time=total_seconds, ) traffic_array.shape # Temperatures for one day. temps_day = temperature.from_to_mins( temperature.load("holly-springs"), datetime.fromisoformat(f"2019-07-03T00:00"), datetime.fromisoformat(f"2019-07-03T23:59"), ) print(f"len temps = {len(temps_day['solar'])}") print(f"len temps = {len(temps_day['temp'])}") # Then generate some cracking time series. damages = [ HealthyDamage(), transverse_crack(), transverse_crack(length=14.0, at_x=48.0), ] sensors = [ Point(x=52, z=-8.4), # Sensor in middle of lane. Point(x=damages[1].crack_area(c.bridge)[0] - n, z=-8.4), # Sensor in front of crack zone. Point(x=damages[2].crack_area(c.bridge)[0] - n, z=-8.4), # Sensor in front of crack zone. ] [print(f"Sensor {i} = {sensors[i]}") for i in range(len(sensors))] time_series = [ crack_time_series( c=c, traffic_array=traffic_array, traffic_array_mins=total_mins, sensor=sensor, crack_frac=0.5, damage=damage, temps=temps_day["temp"], solar=temps_day["solar"], ) for damage, sensor in zip(damages, sensors) ] plt.portrait() for i, (y_trans, strain) in enumerate(time_series): x = np.arange(len(strain)) * c.sensor_hz / 60 x_m = sensors[i].x damage_str = "Healthy Bridge" if i == 1: damage_str = "0.5 m crack zone" if i == 2: damage_str = "14 m crack zone" plt.subplot(len(time_series), 2, i * 2 + 1) plt.plot(x, y_trans * 1000, color="tab:blue") if i < len(time_series) - 1: plt.tick_params(axis="x", bottom=False, labelbottom=False) else: plt.xlabel("Hours") plt.title(f"At x = {x_m} m\n{damage_str}") plt.ylabel("Y trans. (mm)") plt.subplot(len(time_series), 2, i * 2 + 2) plt.plot(x, strain * 1e6, color="tab:orange") if i < len(time_series) - 1: plt.tick_params(axis="x", bottom=False, labelbottom=False) else: plt.xlabel("Hours") plt.title(f"At x = {x_m} m,\n{damage_str}") plt.ylabel("Microstrain XXB") plt.tight_layout() plt.savefig(c.get_image_path("crack", "time-series-q5.pdf")) plt.close()
def plot_mmm_strain_convergence( c: Config, pier: int, df: pd.DataFrame, all_strains: Dict[float, Responses], title: str, without: Optional[Callable[[Point], bool]] = None, append: Optional[str] = None, ): """Plot convergence of given fem as model size grows.""" # A grid of points 1m apart, over which to calculate fem. grid = [ Point(x=x, y=0, z=z) for x, z in itertools.product( np.linspace(c.bridge.x_min, c.bridge.x_max, int(c.bridge.length)), np.linspace(c.bridge.z_min, c.bridge.z_max, int(c.bridge.width)), ) ] # If requested, remove some values from the fem. if without is not None: grid = [point for point in grid if not without(point)] for msl, strains in all_strains.items(): print(f"Removing points from strains with max_shell_len = {msl}") all_strains[msl] = strains.without(without) # Collect fem over all fem, and over the grid. Iterate by # decreasing max_shell_len. mins, maxes, means = [], [], [] gmins, gmaxes, gmeans = [], [], [] max_shell_lens = [] for msl, strains in sorted(all_strains.items(), key=lambda kv: -kv[0]): max_shell_lens.append(msl) print_i(f"Gathering strains with max_shell_len = {msl}", end="\r") grid_strains = np.array([strains.at_deck(point, interp=True) for point in grid]) gmins.append(scalar(np.min(grid_strains))) gmaxes.append(scalar(np.max(grid_strains))) gmeans.append(scalar(np.mean(grid_strains))) strains = np.array(list(strains.values())) mins.append(scalar(np.min(strains))) maxes.append(scalar(np.max(strains))) means.append(scalar(np.mean(strains))) print() # Normalize and plot the mins, maxes, and means. def normalize(ys): print(ys) return ys / np.mean(ys[-5:]) mins, maxes, means = normalize(mins), normalize(maxes), normalize(means) gmins, gmaxes, gmeans = normalize(gmins), normalize(gmaxes), normalize(gmeans) deck_nodes = [df.at[msl, "deck-nodes"] for msl in max_shell_lens] pier_nodes = [df.at[msl, "pier-nodes"] for msl in max_shell_lens] num_nodes = np.array(deck_nodes) + np.array(pier_nodes) print(f"MSLs = {max_shell_lens}") print(f"num_nodes = {num_nodes}") # Plot all lines, for debugging. plt.landscape() plt.plot(num_nodes, mins, label="mins") plt.plot(num_nodes, maxes, label="maxes") plt.plot(num_nodes, means, label="means") plt.plot(num_nodes, gmins, label="gmins") plt.plot(num_nodes, gmaxes, label="gmaxes") plt.plot(num_nodes, gmeans, label="gmeans") plt.grid(axis="y") plt.xlabel("Nodes in FEM") plt.ylabel("Strain") plt.title(title) plt.tight_layout() plt.legend() plt.savefig( c.get_image_path("convergence-pier-strain", f"mmm-{append}-all.pdf", acc=False) ) plt.close() # Only plot some lines, for the thesis. plt.landscape() plt.plot(num_nodes, gmins, label="Minimum") plt.plot(num_nodes, gmaxes, label="Maximum") plt.plot(num_nodes, gmeans, label="Mean") plt.grid(axis="y") plt.title(title) plt.xlabel("Nodes in FEM") plt.ylabel("Strain") plt.legend() plt.tight_layout() plt.savefig( c.get_image_path("convergence-pier-strain", f"mmm-{append}.pdf", acc=False) ) plt.close()